diff --git a/Dockerfile b/Dockerfile index f47bbee002..90cf633bc1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,6 @@ COPY . . RUN NO_DOCKER=1 make build FROM registry.svc.ci.openshift.org/openshift/origin-v4.0:base -COPY --from=builder /go/src/github.com/openshift/machine-api-operator/owned-manifests owned-manifests COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator . COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller . diff --git a/Dockerfile.rhel7 b/Dockerfile.rhel7 index 4b78b39d18..b1f6949b6c 100644 --- a/Dockerfile.rhel7 +++ b/Dockerfile.rhel7 @@ -4,7 +4,6 @@ COPY . . RUN NO_DOCKER=1 make build FROM registry.svc.ci.openshift.org/ocp/4.0:base -COPY --from=builder /go/src/github.com/openshift/machine-api-operator/owned-manifests owned-manifests COPY --from=builder /go/src/github.com/openshift/machine-api-operator/install manifests COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/machine-api-operator . COPY --from=builder /go/src/github.com/openshift/machine-api-operator/bin/nodelink-controller . diff --git a/Gopkg.lock b/Gopkg.lock index 129164c603..aeffe8d95a 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -379,13 +379,10 @@ [[projects]] branch = "master" - digest = "1:8f6432915758d422dd8a1e023c6f11ed58feb2f79fc42ad43971829886263788" + digest = "1:35475e90186b9102c7ba52b505f3289156fc745d15bccbd34bcea6caf7f31632" name = "github.com/openshift/api" packages = [ "config/v1", - "image/docker10", - "image/dockerpre012", - "image/v1", "security/v1", ] pruneopts = "NUT" @@ -393,7 +390,7 @@ [[projects]] branch = "master" - digest = "1:5b7eb2e76c3cfbcb30ce08cb53cb9668cedd55b00d03f80de1e9c6a1833564c7" + digest = "1:192abee7a3aa1ad1e84356e6fa4c14e9161ed107b44ff8fa80bbf1015c5ef426" name = "github.com/openshift/client-go" packages = [ "config/clientset/versioned", @@ -401,6 +398,10 @@ "config/clientset/versioned/scheme", "config/clientset/versioned/typed/config/v1", "config/clientset/versioned/typed/config/v1/fake", + "config/informers/externalversions", + "config/informers/externalversions/config", + "config/informers/externalversions/config/v1", + "config/informers/externalversions/internalinterfaces", "config/listers/config/v1", "security/clientset/versioned/scheme", "security/clientset/versioned/typed/security/v1", @@ -466,12 +467,11 @@ [[projects]] branch = "master" - digest = "1:40e9666d2102490edc5c3cfcecbd91677895a7d4560be5a79219df11511155dc" + digest = "1:b885968838fbb70f55036947919e3e65168639ab42deee923c9aef0d3730b447" name = "github.com/openshift/cluster-version-operator" packages = [ "lib/resourceapply", "lib/resourcemerge", - "lib/resourceread", ] pruneopts = "NUT" revision = "d8611ec571d0c201c47044a729e0cf673ccd1a68" @@ -1271,6 +1271,9 @@ "github.com/openshift/api/config/v1", "github.com/openshift/client-go/config/clientset/versioned", "github.com/openshift/client-go/config/clientset/versioned/fake", + "github.com/openshift/client-go/config/informers/externalversions", + "github.com/openshift/client-go/config/informers/externalversions/config/v1", + "github.com/openshift/client-go/config/listers/config/v1", "github.com/openshift/cluster-api-actuator-pkg/pkg/e2e", "github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/autoscaler", "github.com/openshift/cluster-api-actuator-pkg/pkg/e2e/infra", @@ -1284,7 +1287,6 @@ "github.com/openshift/cluster-autoscaler-operator/pkg/apis", "github.com/openshift/cluster-version-operator/lib/resourceapply", "github.com/openshift/cluster-version-operator/lib/resourcemerge", - "github.com/openshift/cluster-version-operator/lib/resourceread", "github.com/operator-framework/operator-sdk/version", "github.com/spf13/cobra", "github.com/spf13/pflag", @@ -1292,6 +1294,7 @@ "k8s.io/api/apps/v1", "k8s.io/api/core/v1", "k8s.io/apimachinery/pkg/api/errors", + "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/labels", "k8s.io/apimachinery/pkg/runtime", diff --git a/cmd/machine-api-operator/controller_context.go b/cmd/machine-api-operator/controller_context.go index a3174f4161..7bf0e9d079 100644 --- a/cmd/machine-api-operator/controller_context.go +++ b/cmd/machine-api-operator/controller_context.go @@ -3,6 +3,9 @@ package main import ( "time" + configinformersv1 "github.com/openshift/client-go/config/informers/externalversions" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" ) @@ -11,7 +14,8 @@ import ( type ControllerContext struct { ClientBuilder *ClientBuilder - KubeNamespacedInformerFactory informers.SharedInformerFactory + KubeNamespacedInformerFactory informers.SharedInformerFactory + ConfigNamespacedInformerFactory configinformersv1.SharedInformerFactory AvailableResources map[schema.GroupVersionResource]bool @@ -25,12 +29,15 @@ type ControllerContext struct { // CreateControllerContext creates the ControllerContext with the ClientBuilder. func CreateControllerContext(cb *ClientBuilder, stop <-chan struct{}, targetNamespace string) *ControllerContext { kubeClient := cb.KubeClientOrDie("kube-shared-informer") + openshiftClient := cb.OpenshiftClientOrDie("openshift-shared-informer") - kubeNamespacedSharedInformer := informers.NewFilteredSharedInformerFactory(kubeClient, resyncPeriod()(), targetNamespace, nil) + kubeNamespacedSharedInformer := informers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod()(), informers.WithNamespace(targetNamespace)) + configNamespacesShareInformer := configinformersv1.NewSharedInformerFactoryWithOptions(openshiftClient, resyncPeriod()(), configinformersv1.WithNamespace(metav1.NamespaceNone)) return &ControllerContext{ - ClientBuilder: cb, - KubeNamespacedInformerFactory: kubeNamespacedSharedInformer, + ClientBuilder: cb, + KubeNamespacedInformerFactory: kubeNamespacedSharedInformer, + ConfigNamespacedInformerFactory: configNamespacesShareInformer, Stop: stop, InformersStarted: make(chan struct{}), ResyncPeriod: resyncPeriod(), diff --git a/cmd/machine-api-operator/start.go b/cmd/machine-api-operator/start.go index 21ea4ab89d..9ed0418891 100644 --- a/cmd/machine-api-operator/start.go +++ b/cmd/machine-api-operator/start.go @@ -81,6 +81,7 @@ func startControllers(ctx *ControllerContext) error { config, ctx.KubeNamespacedInformerFactory.Apps().V1().Deployments(), + ctx.ConfigNamespacedInformerFactory.Config().V1().FeatureGates(), ctx.ClientBuilder.KubeClientOrDie(componentName), ctx.ClientBuilder.OpenshiftClientOrDie(componentName), diff --git a/owned-manifests/clusterapi-manager-controllers.yaml b/owned-manifests/clusterapi-manager-controllers.yaml deleted file mode 100644 index 989547f937..0000000000 --- a/owned-manifests/clusterapi-manager-controllers.yaml +++ /dev/null @@ -1,83 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: clusterapi-manager-controllers - namespace: {{ .TargetNamespace }} - labels: - api: clusterapi - k8s-app: controller -spec: - securityContext: - runAsNonRoot: true - runAsUser: 65534 - selector: - matchLabels: - api: clusterapi - k8s-app: controller - replicas: 1 - template: - metadata: - labels: - api: clusterapi - k8s-app: controller - spec: - priorityClassName: system-node-critical - nodeSelector: - node-role.kubernetes.io/master: "" - tolerations: - - effect: NoSchedule - key: node-role.kubernetes.io/master - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/notReady - operator: Exists - - effect: NoExecute - key: node.alpha.kubernetes.io/unreachable - operator: Exists - containers: - - name: controller-manager - image: {{ .Controllers.Provider }} - command: - - "./manager" - args: - - --logtostderr=true - - --v=3 - resources: - requests: - cpu: 10m - memory: 20Mi - - name: machine-controller - image: {{ .Controllers.Provider }} - env: - - name: NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - command: - - /machine-controller-manager - args: - - --logtostderr=true - - --v=3 - - name: nodelink-controller - image: {{ .Controllers.NodeLink }} - command: - - /nodelink-controller - args: - - --logtostderr=true - - --v=3 - resources: - requests: - cpu: 10m - memory: 20Mi - - name: machine-healthcheck - image: {{ .Controllers.MachineHealthCheck }} - command: - - /machine-healthcheck - args: - - --logtostderr=true - - --v=3 - resources: - requests: - cpu: 10m - memory: 20Mi diff --git a/pkg/operator/featuresgate.go b/pkg/operator/featuresgate.go new file mode 100644 index 0000000000..f54de8b0fb --- /dev/null +++ b/pkg/operator/featuresgate.go @@ -0,0 +1,54 @@ +package operator + +import ( + "fmt" + + osev1 "github.com/openshift/api/config/v1" +) + +const ( + // MachineAPIOperatorFeatureGate contains the name of the machine-api-operator FeatureGate object + MachineAPIOperatorFeatureGate = "machine-api-operator" + + // FeatureGateMachineHealthCheck contains the name of the MachineHealthCheck feature gate + FeatureGateMachineHealthCheck = "machine-health-check" +) + +// MachineAPIOperatorFeatureSets contains a map of machine-api-operator features names to Enabled/Disabled feature. +// +// NOTE: The caller needs to make sure to check for the existence of the value +// using golang's existence field. A possible scenario is an upgrade where new +// FeatureSets are added and a controller has not been upgraded with a newer +// version of this file. In this upgrade scenario the map could return nil. +// +// example: +// if featureSet, ok := MachineAPIOperatorFeatureSets["SomeNewFeature"]; ok { } +// +// If you put an item in either of these lists, put your area and name on it so we can find owners. +var MachineAPIOperatorFeatureSets = map[osev1.FeatureSet]*osev1.FeatureGateEnabledDisabled{ + osev1.Default: { + Disabled: []string{ + FeatureGateMachineHealthCheck, // machine-api-operator, alukiano + }, + }, + osev1.TechPreviewNoUpgrade: { + Enabled: []string{ + FeatureGateMachineHealthCheck, // machine-api-operator, alukiano + }, + }, +} + +func generateFeatureMap(featureSet osev1.FeatureSet) (map[string]bool, error) { + rv := map[string]bool{} + set, ok := MachineAPIOperatorFeatureSets[featureSet] + if !ok { + return nil, fmt.Errorf("enabled FeatureSet %v does not have a corresponding config", featureSet) + } + for _, featEnabled := range set.Enabled { + rv[featEnabled] = true + } + for _, featDisabled := range set.Disabled { + rv[featDisabled] = false + } + return rv, nil +} diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index ba7956c561..3995f3cf52 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -6,9 +6,11 @@ import ( "time" "github.com/golang/glog" + osconfigv1 "github.com/openshift/api/config/v1" osclientset "github.com/openshift/client-go/config/clientset/versioned" + configinformersv1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" - osconfigv1 "github.com/openshift/api/config/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -29,8 +31,7 @@ const ( // a machineconfig pool is going to be requeued: // // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s - maxRetries = 15 - ownedManifestsDir = "owned-manifests" + maxRetries = 15 ) // Operator defines machine api operator. @@ -49,6 +50,9 @@ type Operator struct { deployLister appslisterv1.DeploymentLister deployListerSynced cache.InformerSynced + featureGateLister configlistersv1.FeatureGateLister + featureGateCacheSync cache.InformerSynced + // queue only ever has one item, but it has nice error handling backoff/retry semantics queue workqueue.RateLimitingInterface operandVersions []osconfigv1.OperandVersion @@ -62,6 +66,7 @@ func New( config string, deployInformer appsinformersv1.DeploymentInformer, + featureGateInformer configinformersv1.FeatureGateInformer, kubeClient kubernetes.Interface, osClient osclientset.Interface, @@ -91,6 +96,7 @@ func New( } deployInformer.Informer().AddEventHandler(optr.eventHandler()) + featureGateInformer.Informer().AddEventHandler(optr.eventHandler()) optr.config = config optr.syncHandler = optr.sync @@ -98,6 +104,9 @@ func New( optr.deployLister = deployInformer.Lister() optr.deployListerSynced = deployInformer.Informer().HasSynced + optr.featureGateLister = featureGateInformer.Lister() + optr.featureGateCacheSync = featureGateInformer.Informer().HasSynced + return optr } diff --git a/pkg/operator/sync.go b/pkg/operator/sync.go index 4d7be4549c..3cf3628418 100644 --- a/pkg/operator/sync.go +++ b/pkg/operator/sync.go @@ -5,15 +5,17 @@ import ( "time" "github.com/golang/glog" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "path/filepath" - + osev1 "github.com/openshift/api/config/v1" "github.com/openshift/cluster-version-operator/lib/resourceapply" - "github.com/openshift/cluster-version-operator/lib/resourceread" ) const ( @@ -48,11 +50,25 @@ func (optr *Operator) syncAll(config OperatorConfig) error { } func (optr *Operator) syncClusterAPIController(config OperatorConfig) error { - controllerBytes, err := PopulateTemplate(&config, filepath.Join(ownedManifestsDir, "clusterapi-manager-controllers.yaml")) + // Fetch the Feature + featureGate, err := optr.featureGateLister.Get(MachineAPIOperatorFeatureGate) + + var featureSet osev1.FeatureSet + if err != nil { + if !errors.IsNotFound(err) { + return err + } + glog.V(2).Infof("failed to find feature gate %s, will use default feature set", MachineAPIOperatorFeatureGate) + featureSet = osev1.Default + } + + featureSet = featureGate.Spec.FeatureSet + features, err := generateFeatureMap(featureSet) if err != nil { return err } - controller := resourceread.ReadDeploymentV1OrDie(controllerBytes) + + controller := newDeployment(config, features) _, updated, err := resourceapply.ApplyDeployment(optr.kubeClient.AppsV1(), controller) if err != nil { return err @@ -90,3 +106,132 @@ func (optr *Operator) waitForDeploymentRollout(resource *appsv1.Deployment) erro return false, nil }) } + +func newDeployment(config OperatorConfig, features map[string]bool) *appsv1.Deployment { + replicas := int32(1) + template := newPodTemplateSpec(config, features) + + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "clusterapi-manager-controllers", + Namespace: config.TargetNamespace, + Labels: map[string]string{ + "api": "clusterapi", + "k8s-app": "controller", + }, + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "api": "clusterapi", + "k8s-app": "controller", + }, + }, + Template: *template, + }, + } +} + +func newPodTemplateSpec(config OperatorConfig, features map[string]bool) *corev1.PodTemplateSpec { + containers := newContainers(config, features) + tolerations := []corev1.Toleration{ + { + Key: "node-role.kubernetes.io/master", + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "CriticalAddonsOnly", + Operator: corev1.TolerationOpExists, + }, + { + Key: "node.alpha.kubernetes.io/notReady", + Effect: corev1.TaintEffectNoExecute, + Operator: corev1.TolerationOpExists, + }, + { + Key: "node.alpha.kubernetes.io/unreachable", + Effect: corev1.TaintEffectNoExecute, + Operator: corev1.TolerationOpExists, + }, + } + + _true := true + user := int64(65534) + return &corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "api": "clusterapi", + "k8s-app": "controller", + }, + }, + Spec: corev1.PodSpec{ + Containers: containers, + PriorityClassName: "system-node-critical", + NodeSelector: map[string]string{"node-role.kubernetes.io/master": ""}, + SecurityContext: &corev1.PodSecurityContext{ + RunAsNonRoot: &_true, + RunAsUser: &user, + }, + Tolerations: tolerations, + }, + } +} + +func newContainers(config OperatorConfig, features map[string]bool) []corev1.Container { + controllerManagerMemory := resource.MustParse("20Mi") + controllerManagerCPU := resource.MustParse("10m") + resources := corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceMemory: controllerManagerMemory, + corev1.ResourceCPU: controllerManagerCPU, + }, + } + args := []string{"--logtostderr=true", "--v=3"} + + containers := []corev1.Container{ + { + Name: "controller-manager", + Image: config.Controllers.Provider, + Command: []string{"/manager"}, + Args: args, + Resources: resources, + }, + { + Name: "machine-controller", + Image: config.Controllers.Provider, + Command: []string{"/machine-controller-manager"}, + Args: args, + Env: []corev1.EnvVar{ + { + Name: "NODE_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "spec.nodeName", + }, + }, + }, + }, + }, + { + Name: "nodelink-controller", + Image: config.Controllers.NodeLink, + Command: []string{"/nodelink-controller"}, + Args: args, + Resources: resources, + }, + } + + // add machine-health-check controller container if it exists and enabled under feature gates + if enabled, ok := features[FeatureGateMachineHealthCheck]; ok && enabled { + c := corev1.Container{ + Name: "machine-healthcheck", + Image: config.Controllers.MachineHealthCheck, + Command: []string{"/machine-healthcheck"}, + Args: args, + Resources: resources, + } + containers = append(containers, c) + } + return containers +} diff --git a/vendor/github.com/openshift/api/image/docker10/doc.go b/vendor/github.com/openshift/api/image/docker10/doc.go deleted file mode 100644 index cc194d24db..0000000000 --- a/vendor/github.com/openshift/api/image/docker10/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// +k8s:deepcopy-gen=package,register - -// Package docker10 is the docker10 version of the API. -package docker10 diff --git a/vendor/github.com/openshift/api/image/docker10/dockertypes.go b/vendor/github.com/openshift/api/image/docker10/dockertypes.go deleted file mode 100644 index a985553db4..0000000000 --- a/vendor/github.com/openshift/api/image/docker10/dockertypes.go +++ /dev/null @@ -1,56 +0,0 @@ -package docker10 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DockerImage is the type representing a container image and its various properties when -// retrieved from the Docker client API. -type DockerImage struct { - metav1.TypeMeta `json:",inline"` - - ID string `json:"Id"` - Parent string `json:"Parent,omitempty"` - Comment string `json:"Comment,omitempty"` - Created metav1.Time `json:"Created,omitempty"` - Container string `json:"Container,omitempty"` - ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` - DockerVersion string `json:"DockerVersion,omitempty"` - Author string `json:"Author,omitempty"` - Config *DockerConfig `json:"Config,omitempty"` - Architecture string `json:"Architecture,omitempty"` - Size int64 `json:"Size,omitempty"` -} - -// DockerConfig is the list of configuration options used when creating a container. -type DockerConfig struct { - Hostname string `json:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty"` - User string `json:"User,omitempty"` - Memory int64 `json:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty"` - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - Tty bool `json:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty"` - Cmd []string `json:"Cmd,omitempty"` - DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - Entrypoint []string `json:"Entrypoint,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty"` - Labels map[string]string `json:"Labels,omitempty"` -} diff --git a/vendor/github.com/openshift/api/image/docker10/register.go b/vendor/github.com/openshift/api/image/docker10/register.go deleted file mode 100644 index 31d616a06c..0000000000 --- a/vendor/github.com/openshift/api/image/docker10/register.go +++ /dev/null @@ -1,38 +0,0 @@ -package docker10 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GroupName = "image.openshift.io" - LegacyGroupName = "" -) - -// SchemeGroupVersion is group version used to register these objects -var ( - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} - LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} - - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) - - AddToScheme = SchemeBuilder.AddToScheme - AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &DockerImage{}, - ) - return nil -} - -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(LegacySchemeGroupVersion, - &DockerImage{}, - ) - return nil -} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go deleted file mode 100644 index b59f75ac21..0000000000 --- a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go +++ /dev/null @@ -1,113 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package docker10 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { - *out = *in - if in.PortSpecs != nil { - in, out := &in.PortSpecs, &out.PortSpecs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecurityOpts != nil { - in, out := &in.SecurityOpts, &out.SecurityOpts - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. -func (in *DockerConfig) DeepCopy() *DockerConfig { - if in == nil { - return nil - } - out := new(DockerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImage) DeepCopyInto(out *DockerImage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Created.DeepCopyInto(&out.Created) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(DockerConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. -func (in *DockerImage) DeepCopy() *DockerImage { - if in == nil { - return nil - } - out := new(DockerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DockerImage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go deleted file mode 100644 index ddeb4403c4..0000000000 --- a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go +++ /dev/null @@ -1,18 +0,0 @@ -package dockerpre012 - -// DeepCopyInto is manually built to copy the (probably bugged) time.Time -func (in *ImagePre012) DeepCopyInto(out *ImagePre012) { - *out = *in - out.Created = in.Created - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - if *in == nil { - *out = nil - } else { - *out = new(Config) - (*in).DeepCopyInto(*out) - } - } - return -} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/vendor/github.com/openshift/api/image/dockerpre012/doc.go deleted file mode 100644 index e4a56260f1..0000000000 --- a/vendor/github.com/openshift/api/image/dockerpre012/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// +k8s:deepcopy-gen=package,register - -// Package dockerpre012 is the dockerpre012 version of the API. -package dockerpre012 diff --git a/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go b/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go deleted file mode 100644 index 685e0b68c5..0000000000 --- a/vendor/github.com/openshift/api/image/dockerpre012/dockertypes.go +++ /dev/null @@ -1,136 +0,0 @@ -package dockerpre012 - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the -// version of metadata that the container image registry uses to persist metadata. -type DockerImage struct { - metav1.TypeMeta `json:",inline"` - - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created metav1.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig DockerConfig `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *DockerConfig `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -// DockerConfig is the list of configuration options used when creating a container. -type DockerConfig struct { - Hostname string `json:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty"` - User string `json:"User,omitempty"` - Memory int64 `json:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty"` - ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` - Tty bool `json:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty"` - Cmd []string `json:"Cmd,omitempty"` - DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - Entrypoint []string `json:"Entrypoint,omitempty"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty"` - // This field is not supported in pre012 and will always be empty. - Labels map[string]string `json:"Labels,omitempty"` -} - -// ImagePre012 serves the same purpose as the Image type except that it is for -// earlier versions of the Docker API (pre-012 to be specific) -// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient -type ImagePre012 struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - Container string `json:"container,omitempty"` - ContainerConfig Config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - Config *Config `json:"config,omitempty"` - Architecture string `json:"architecture,omitempty"` - Size int64 `json:"size,omitempty"` -} - -// Config is the list of configuration options used when creating a container. -// Config does not contain the options that are specific to starting a container on a -// given host. Those are contained in HostConfig -// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient -type Config struct { - Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` - Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` - User string `json:"User,omitempty" yaml:"User,omitempty"` - Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` - MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` - MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` - KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` - PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"` - CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` - CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` - AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` - AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` - AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` - PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` - ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` - StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` - Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` - OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` - StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` - Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` - Cmd []string `json:"Cmd" yaml:"Cmd"` - DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only - Image string `json:"Image,omitempty" yaml:"Image,omitempty"` - Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` - VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` - VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` - MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` - Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` - NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` - SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` - OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` - Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` - Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` -} - -// Mount represents a mount point in the container. -// -// It has been added in the version 1.20 of the Docker API, available since -// Docker 1.8. -// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient -type Mount struct { - Name string - Source string - Destination string - Driver string - Mode string - RW bool -} - -// Port represents the port number and the protocol, in the form -// /. For example: 80/tcp. -// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient -type Port string diff --git a/vendor/github.com/openshift/api/image/dockerpre012/register.go b/vendor/github.com/openshift/api/image/dockerpre012/register.go deleted file mode 100644 index 469806dbe7..0000000000 --- a/vendor/github.com/openshift/api/image/dockerpre012/register.go +++ /dev/null @@ -1,37 +0,0 @@ -package dockerpre012 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - GroupName = "image.openshift.io" - LegacyGroupName = "" -) - -var ( - SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"} - LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"} - - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme - - LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) - AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &DockerImage{}, - ) - return nil -} - -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(LegacySchemeGroupVersion, - &DockerImage{}, - ) - return nil -} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go deleted file mode 100644 index d9042704ad..0000000000 --- a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go +++ /dev/null @@ -1,216 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package dockerpre012 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Config) DeepCopyInto(out *Config) { - *out = *in - if in.PortSpecs != nil { - in, out := &in.PortSpecs, &out.PortSpecs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[Port]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecurityOpts != nil { - in, out := &in.SecurityOpts, &out.SecurityOpts - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Mounts != nil { - in, out := &in.Mounts, &out.Mounts - *out = make([]Mount, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. -func (in *Config) DeepCopy() *Config { - if in == nil { - return nil - } - out := new(Config) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { - *out = *in - if in.PortSpecs != nil { - in, out := &in.PortSpecs, &out.PortSpecs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExposedPorts != nil { - in, out := &in.ExposedPorts, &out.ExposedPorts - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Cmd != nil { - in, out := &in.Cmd, &out.Cmd - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DNS != nil { - in, out := &in.DNS, &out.DNS - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make(map[string]struct{}, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Entrypoint != nil { - in, out := &in.Entrypoint, &out.Entrypoint - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecurityOpts != nil { - in, out := &in.SecurityOpts, &out.SecurityOpts - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.OnBuild != nil { - in, out := &in.OnBuild, &out.OnBuild - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. -func (in *DockerConfig) DeepCopy() *DockerConfig { - if in == nil { - return nil - } - out := new(DockerConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImage) DeepCopyInto(out *DockerImage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.Created.DeepCopyInto(&out.Created) - in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(DockerConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. -func (in *DockerImage) DeepCopy() *DockerImage { - if in == nil { - return nil - } - out := new(DockerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DockerImage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012. -func (in *ImagePre012) DeepCopy() *ImagePre012 { - if in == nil { - return nil - } - out := new(ImagePre012) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Mount) DeepCopyInto(out *Mount) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. -func (in *Mount) DeepCopy() *Mount { - if in == nil { - return nil - } - out := new(Mount) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/image/v1/doc.go b/vendor/github.com/openshift/api/image/v1/doc.go deleted file mode 100644 index e57d45bbf9..0000000000 --- a/vendor/github.com/openshift/api/image/v1/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true - -// +groupName=image.openshift.io -// Package v1 is the v1 version of the API. -package v1 diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go deleted file mode 100644 index ae113b7d35..0000000000 --- a/vendor/github.com/openshift/api/image/v1/generated.pb.go +++ /dev/null @@ -1,8646 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/openshift/api/image/v1/generated.proto - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - github.com/openshift/api/image/v1/generated.proto - - It has these top-level messages: - DockerImageReference - Image - ImageBlobReferences - ImageImportSpec - ImageImportStatus - ImageLayer - ImageLayerData - ImageList - ImageLookupPolicy - ImageSignature - ImageStream - ImageStreamImage - ImageStreamImport - ImageStreamImportSpec - ImageStreamImportStatus - ImageStreamLayers - ImageStreamList - ImageStreamMapping - ImageStreamSpec - ImageStreamStatus - ImageStreamTag - ImageStreamTagList - NamedTagEventList - RepositoryImportSpec - RepositoryImportStatus - SignatureCondition - SignatureGenericEntity - SignatureIssuer - SignatureSubject - TagEvent - TagEventCondition - TagImportPolicy - TagReference - TagReferencePolicy -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *DockerImageReference) Reset() { *m = DockerImageReference{} } -func (*DockerImageReference) ProtoMessage() {} -func (*DockerImageReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *Image) Reset() { *m = Image{} } -func (*Image) ProtoMessage() {} -func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } -func (*ImageBlobReferences) ProtoMessage() {} -func (*ImageBlobReferences) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } -func (*ImageImportSpec) ProtoMessage() {} -func (*ImageImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } -func (*ImageImportStatus) ProtoMessage() {} -func (*ImageImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ImageLayer) Reset() { *m = ImageLayer{} } -func (*ImageLayer) ProtoMessage() {} -func (*ImageLayer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } -func (*ImageLayerData) ProtoMessage() {} -func (*ImageLayerData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *ImageList) Reset() { *m = ImageList{} } -func (*ImageList) ProtoMessage() {} -func (*ImageList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } -func (*ImageLookupPolicy) ProtoMessage() {} -func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *ImageSignature) Reset() { *m = ImageSignature{} } -func (*ImageSignature) ProtoMessage() {} -func (*ImageSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ImageStream) Reset() { *m = ImageStream{} } -func (*ImageStream) ProtoMessage() {} -func (*ImageStream) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } -func (*ImageStreamImage) ProtoMessage() {} -func (*ImageStreamImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } -func (*ImageStreamImport) ProtoMessage() {} -func (*ImageStreamImport) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } -func (*ImageStreamImportSpec) ProtoMessage() {} -func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } -func (*ImageStreamImportStatus) ProtoMessage() {} -func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{14} -} - -func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } -func (*ImageStreamLayers) ProtoMessage() {} -func (*ImageStreamLayers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } -func (*ImageStreamList) ProtoMessage() {} -func (*ImageStreamList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } -func (*ImageStreamMapping) ProtoMessage() {} -func (*ImageStreamMapping) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } -func (*ImageStreamSpec) ProtoMessage() {} -func (*ImageStreamSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } -func (*ImageStreamStatus) ProtoMessage() {} -func (*ImageStreamStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } -func (*ImageStreamTag) ProtoMessage() {} -func (*ImageStreamTag) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } -func (*ImageStreamTagList) ProtoMessage() {} -func (*ImageStreamTagList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } -func (*NamedTagEventList) ProtoMessage() {} -func (*NamedTagEventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } -func (*RepositoryImportSpec) ProtoMessage() {} -func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } -func (*RepositoryImportStatus) ProtoMessage() {} -func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } -func (*SignatureCondition) ProtoMessage() {} -func (*SignatureCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } -func (*SignatureGenericEntity) ProtoMessage() {} -func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } -func (*SignatureIssuer) ProtoMessage() {} -func (*SignatureIssuer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } -func (*SignatureSubject) ProtoMessage() {} -func (*SignatureSubject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } - -func (m *TagEvent) Reset() { *m = TagEvent{} } -func (*TagEvent) ProtoMessage() {} -func (*TagEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } - -func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } -func (*TagEventCondition) ProtoMessage() {} -func (*TagEventCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } - -func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } -func (*TagImportPolicy) ProtoMessage() {} -func (*TagImportPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } - -func (m *TagReference) Reset() { *m = TagReference{} } -func (*TagReference) ProtoMessage() {} -func (*TagReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } - -func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } -func (*TagReferencePolicy) ProtoMessage() {} -func (*TagReferencePolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } - -func init() { - proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference") - proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image") - proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences") - proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec") - proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus") - proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer") - proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData") - proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList") - proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy") - proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature") - proto.RegisterType((*ImageStream)(nil), "github.com.openshift.api.image.v1.ImageStream") - proto.RegisterType((*ImageStreamImage)(nil), "github.com.openshift.api.image.v1.ImageStreamImage") - proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport") - proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec") - proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus") - proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers") - proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList") - proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping") - proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec") - proto.RegisterType((*ImageStreamStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamStatus") - proto.RegisterType((*ImageStreamTag)(nil), "github.com.openshift.api.image.v1.ImageStreamTag") - proto.RegisterType((*ImageStreamTagList)(nil), "github.com.openshift.api.image.v1.ImageStreamTagList") - proto.RegisterType((*NamedTagEventList)(nil), "github.com.openshift.api.image.v1.NamedTagEventList") - proto.RegisterType((*RepositoryImportSpec)(nil), "github.com.openshift.api.image.v1.RepositoryImportSpec") - proto.RegisterType((*RepositoryImportStatus)(nil), "github.com.openshift.api.image.v1.RepositoryImportStatus") - proto.RegisterType((*SignatureCondition)(nil), "github.com.openshift.api.image.v1.SignatureCondition") - proto.RegisterType((*SignatureGenericEntity)(nil), "github.com.openshift.api.image.v1.SignatureGenericEntity") - proto.RegisterType((*SignatureIssuer)(nil), "github.com.openshift.api.image.v1.SignatureIssuer") - proto.RegisterType((*SignatureSubject)(nil), "github.com.openshift.api.image.v1.SignatureSubject") - proto.RegisterType((*TagEvent)(nil), "github.com.openshift.api.image.v1.TagEvent") - proto.RegisterType((*TagEventCondition)(nil), "github.com.openshift.api.image.v1.TagEventCondition") - proto.RegisterType((*TagImportPolicy)(nil), "github.com.openshift.api.image.v1.TagImportPolicy") - proto.RegisterType((*TagReference)(nil), "github.com.openshift.api.image.v1.TagReference") - proto.RegisterType((*TagReferencePolicy)(nil), "github.com.openshift.api.image.v1.TagReferencePolicy") -} -func (m *DockerImageReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) - i += copy(dAtA[i:], m.Tag) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - return i, nil -} - -func (m *Image) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Image) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) - i += copy(dAtA[i:], m.DockerImageReference) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DockerImageMetadata.Size())) - n2, err := m.DockerImageMetadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion))) - i += copy(dAtA[i:], m.DockerImageMetadataVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest))) - i += copy(dAtA[i:], m.DockerImageManifest) - if len(m.DockerImageLayers) > 0 { - for _, msg := range m.DockerImageLayers { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Signatures) > 0 { - for _, msg := range m.Signatures { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.DockerImageSignatures) > 0 { - for _, b := range m.DockerImageSignatures { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType))) - i += copy(dAtA[i:], m.DockerImageManifestMediaType) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig))) - i += copy(dAtA[i:], m.DockerImageConfig) - return i, nil -} - -func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Layers) > 0 { - for _, s := range m.Layers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Config != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config))) - i += copy(dAtA[i:], *m.Config) - } - dAtA[i] = 0x18 - i++ - if m.ImageMissing { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n3, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if m.To != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.To.Size())) - n4, err := m.To.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ImportPolicy.Size())) - n5, err := m.ImportPolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x20 - i++ - if m.IncludeManifest { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReferencePolicy.Size())) - n6, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if m.Image != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n8, err := m.Image.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) - i += copy(dAtA[i:], m.Tag) - return i, nil -} - -func (m *ImageLayer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) - i += copy(dAtA[i:], m.MediaType) - return i, nil -} - -func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LayerSize != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) - i += copy(dAtA[i:], m.MediaType) - return i, nil -} - -func (m *ImageList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x18 - i++ - if m.Local { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *ImageSignature) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n10, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Content != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content))) - i += copy(dAtA[i:], m.Content) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity))) - i += copy(dAtA[i:], m.ImageIdentity) - if len(m.SignedClaims) > 0 { - keysForSignedClaims := make([]string, 0, len(m.SignedClaims)) - for k := range m.SignedClaims { - keysForSignedClaims = append(keysForSignedClaims, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) - for _, k := range keysForSignedClaims { - dAtA[i] = 0x32 - i++ - v := m.SignedClaims[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.Created != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Created.Size())) - n11, err := m.Created.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.IssuedBy != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IssuedBy.Size())) - n12, err := m.IssuedBy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.IssuedTo != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IssuedTo.Size())) - n13, err := m.IssuedTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - return i, nil -} - -func (m *ImageStream) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n14, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n15, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n16, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil -} - -func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n17, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n18, err := m.Image.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - return i, nil -} - -func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n19, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n20, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n21, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - return i, nil -} - -func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Import { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Repository != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Repository.Size())) - n22, err := m.Repository.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Import != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Import.Size())) - n23, err := m.Import.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.Repository != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Repository.Size())) - n24, err := m.Repository.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - if len(m.Blobs) > 0 { - keysForBlobs := make([]string, 0, len(m.Blobs)) - for k := range m.Blobs { - keysForBlobs = append(keysForBlobs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) - for _, k := range keysForBlobs { - dAtA[i] = 0x12 - i++ - v := m.Blobs[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n26, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - } - if len(m.Images) > 0 { - keysForImages := make([]string, 0, len(m.Images)) - for k := range m.Images { - keysForImages = append(keysForImages, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForImages) - for _, k := range keysForImages { - dAtA[i] = 0x1a - i++ - v := m.Images[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n27, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - } - return i, nil -} - -func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n29, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n30, err := m.Image.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) - i += copy(dAtA[i:], m.Tag) - return i, nil -} - -func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) - i += copy(dAtA[i:], m.DockerImageRepository) - if len(m.Tags) > 0 { - for _, msg := range m.Tags { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LookupPolicy.Size())) - n31, err := m.LookupPolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - return i, nil -} - -func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) - i += copy(dAtA[i:], m.DockerImageRepository) - if len(m.Tags) > 0 { - for _, msg := range m.Tags { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository))) - i += copy(dAtA[i:], m.PublicDockerImageRepository) - return i, nil -} - -func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - if m.Tag != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Tag.Size())) - n33, err := m.Tag.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Image.Size())) - n34, err := m.Image.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LookupPolicy.Size())) - n35, err := m.LookupPolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n36, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) - i += copy(dAtA[i:], m.Tag) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n37, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ImportPolicy.Size())) - n38, err := m.ImportPolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - dAtA[i] = 0x18 - i++ - if m.IncludeManifest { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReferencePolicy.Size())) - n39, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - return i, nil -} - -func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n40, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AdditionalTags) > 0 { - for _, s := range m.AdditionalTags { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *SignatureCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n41, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n42, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) - i += copy(dAtA[i:], m.Organization) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName))) - i += copy(dAtA[i:], m.CommonName) - return i, nil -} - -func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SignatureGenericEntity.Size())) - n43, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - return i, nil -} - -func (m *SignatureSubject) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SignatureGenericEntity.Size())) - n44, err := m.SignatureGenericEntity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) - i += copy(dAtA[i:], m.PublicKeyID) - return i, nil -} - -func (m *TagEvent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Created.Size())) - n45, err := m.Created.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) - i += copy(dAtA[i:], m.DockerImageReference) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - return i, nil -} - -func (m *TagEventCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n46, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - return i, nil -} - -func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Insecure { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Scheduled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *TagReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for _, k := range keysForAnnotations { - dAtA[i] = 0x12 - i++ - v := m.Annotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.From != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.From.Size())) - n47, err := m.From.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - } - dAtA[i] = 0x20 - i++ - if m.Reference { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.Generation != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ImportPolicy.Size())) - n48, err := m.ImportPolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReferencePolicy.Size())) - n49, err := m.ReferencePolicy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - return i, nil -} - -func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *DockerImageReference) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tag) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Image) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DockerImageReference) - n += 1 + l + sovGenerated(uint64(l)) - l = m.DockerImageMetadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DockerImageMetadataVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DockerImageManifest) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.DockerImageLayers) > 0 { - for _, e := range m.DockerImageLayers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Signatures) > 0 { - for _, e := range m.Signatures { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.DockerImageSignatures) > 0 { - for _, b := range m.DockerImageSignatures { - l = len(b) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.DockerImageManifestMediaType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DockerImageConfig) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageBlobReferences) Size() (n int) { - var l int - _ = l - if len(m.Layers) > 0 { - for _, s := range m.Layers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Config != nil { - l = len(*m.Config) - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *ImageImportSpec) Size() (n int) { - var l int - _ = l - l = m.From.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.To != nil { - l = m.To.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.ImportPolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = m.ReferencePolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageImportStatus) Size() (n int) { - var l int - _ = l - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Image != nil { - l = m.Image.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Tag) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageLayer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.LayerSize)) - l = len(m.MediaType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageLayerData) Size() (n int) { - var l int - _ = l - if m.LayerSize != nil { - n += 1 + sovGenerated(uint64(*m.LayerSize)) - } - l = len(m.MediaType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ImageLookupPolicy) Size() (n int) { - var l int - _ = l - n += 2 - return n -} - -func (m *ImageSignature) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Content != nil { - l = len(m.Content) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ImageIdentity) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.SignedClaims) > 0 { - for k, v := range m.SignedClaims { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Created != nil { - l = m.Created.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IssuedBy != nil { - l = m.IssuedBy.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IssuedTo != nil { - l = m.IssuedTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ImageStream) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamImage) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Image.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamImport) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamImportSpec) Size() (n int) { - var l int - _ = l - n += 2 - if m.Repository != nil { - l = m.Repository.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ImageStreamImportStatus) Size() (n int) { - var l int - _ = l - if m.Import != nil { - l = m.Import.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Repository != nil { - l = m.Repository.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ImageStreamLayers) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Blobs) > 0 { - for k, v := range m.Blobs { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Images) > 0 { - for k, v := range m.Images { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ImageStreamList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ImageStreamMapping) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Image.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tag) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamSpec) Size() (n int) { - var l int - _ = l - l = len(m.DockerImageRepository) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Tags) > 0 { - for _, e := range m.Tags { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.LookupPolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamStatus) Size() (n int) { - var l int - _ = l - l = len(m.DockerImageRepository) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Tags) > 0 { - for _, e := range m.Tags { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.PublicDockerImageRepository) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamTag) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Tag != nil { - l = m.Tag.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Generation)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Image.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LookupPolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ImageStreamTagList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamedTagEventList) Size() (n int) { - var l int - _ = l - l = len(m.Tag) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RepositoryImportSpec) Size() (n int) { - var l int - _ = l - l = m.From.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.ImportPolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = m.ReferencePolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RepositoryImportStatus) Size() (n int) { - var l int - _ = l - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AdditionalTags) > 0 { - for _, s := range m.AdditionalTags { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SignatureCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SignatureGenericEntity) Size() (n int) { - var l int - _ = l - l = len(m.Organization) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.CommonName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SignatureIssuer) Size() (n int) { - var l int - _ = l - l = m.SignatureGenericEntity.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SignatureSubject) Size() (n int) { - var l int - _ = l - l = m.SignatureGenericEntity.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PublicKeyID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TagEvent) Size() (n int) { - var l int - _ = l - l = m.Created.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DockerImageReference) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Generation)) - return n -} - -func (m *TagEventCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Generation)) - return n -} - -func (m *TagImportPolicy) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - return n -} - -func (m *TagReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.From != nil { - l = m.From.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.Generation != nil { - n += 1 + sovGenerated(uint64(*m.Generation)) - } - l = m.ImportPolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.ReferencePolicy.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TagReferencePolicy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *DockerImageReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DockerImageReference{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `}`, - }, "") - return s -} -func (this *Image) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Image{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, - `DockerImageMetadata:` + strings.Replace(strings.Replace(this.DockerImageMetadata.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`, - `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`, - `DockerImageLayers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageLayers), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + `,`, - `Signatures:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Signatures), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + `,`, - `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`, - `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`, - `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`, - `}`, - }, "") - return s -} -func (this *ImageBlobReferences) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageBlobReferences{`, - `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, - `Config:` + valueToStringGenerated(this.Config) + `,`, - `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`, - `}`, - }, "") - return s -} -func (this *ImageImportSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageImportSpec{`, - `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "k8s_io_api_core_v1.LocalObjectReference", 1) + `,`, - `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, - `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, - `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageImportStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageImportStatus{`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, - `Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`, - `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, - `}`, - }, "") - return s -} -func (this *ImageLayer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageLayer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `}`, - }, "") - return s -} -func (this *ImageLayerData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageLayerData{`, - `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, - `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, - `}`, - }, "") - return s -} -func (this *ImageList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Image", "Image", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageLookupPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageLookupPolicy{`, - `Local:` + fmt.Sprintf("%v", this.Local) + `,`, - `}`, - }, "") - return s -} -func (this *ImageSignature) String() string { - if this == nil { - return "nil" - } - keysForSignedClaims := make([]string, 0, len(this.SignedClaims)) - for k := range this.SignedClaims { - keysForSignedClaims = append(keysForSignedClaims, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) - mapStringForSignedClaims := "map[string]string{" - for _, k := range keysForSignedClaims { - mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k]) - } - mapStringForSignedClaims += "}" - s := strings.Join([]string{`&ImageSignature{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Content:` + valueToStringGenerated(this.Content) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + `,`, - `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`, - `SignedClaims:` + mapStringForSignedClaims + `,`, - `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `IssuedBy:` + strings.Replace(fmt.Sprintf("%v", this.IssuedBy), "SignatureIssuer", "SignatureIssuer", 1) + `,`, - `IssuedTo:` + strings.Replace(fmt.Sprintf("%v", this.IssuedTo), "SignatureSubject", "SignatureSubject", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStream) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStream{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamImage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamImage{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamImport) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamImport{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamImportSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamImportSpec{`, - `Import:` + fmt.Sprintf("%v", this.Import) + `,`, - `Repository:` + strings.Replace(fmt.Sprintf("%v", this.Repository), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamImportStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamImportStatus{`, - `Import:` + strings.Replace(fmt.Sprintf("%v", this.Import), "ImageStream", "ImageStream", 1) + `,`, - `Repository:` + strings.Replace(fmt.Sprintf("%v", this.Repository), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamLayers) String() string { - if this == nil { - return "nil" - } - keysForBlobs := make([]string, 0, len(this.Blobs)) - for k := range this.Blobs { - keysForBlobs = append(keysForBlobs, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) - mapStringForBlobs := "map[string]ImageLayerData{" - for _, k := range keysForBlobs { - mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) - } - mapStringForBlobs += "}" - keysForImages := make([]string, 0, len(this.Images)) - for k := range this.Images { - keysForImages = append(keysForImages, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForImages) - mapStringForImages := "map[string]ImageBlobReferences{" - for _, k := range keysForImages { - mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) - } - mapStringForImages += "}" - s := strings.Join([]string{`&ImageStreamLayers{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Blobs:` + mapStringForBlobs + `,`, - `Images:` + mapStringForImages + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ImageStream", "ImageStream", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamMapping) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamMapping{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamSpec{`, - `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, - `Tags:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tags), "TagReference", "TagReference", 1), `&`, ``, 1) + `,`, - `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamStatus{`, - `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, - `Tags:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tags), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + `,`, - `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamTag) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamTag{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Tag:` + strings.Replace(fmt.Sprintf("%v", this.Tag), "TagReference", "TagReference", 1) + `,`, - `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + `,`, - `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, - `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ImageStreamTagList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ImageStreamTagList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamedTagEventList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamedTagEventList{`, - `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "TagEvent", "TagEvent", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RepositoryImportSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RepositoryImportSpec{`, - `From:` + strings.Replace(strings.Replace(this.From.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, - `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, - `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RepositoryImportStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RepositoryImportStatus{`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "Status", "k8s_io_apimachinery_pkg_apis_meta_v1.Status", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + `,`, - `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`, - `}`, - }, "") - return s -} -func (this *SignatureCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SignatureCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *SignatureGenericEntity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SignatureGenericEntity{`, - `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, - `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`, - `}`, - }, "") - return s -} -func (this *SignatureIssuer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SignatureIssuer{`, - `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SignatureSubject) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SignatureSubject{`, - `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, - `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`, - `}`, - }, "") - return s -} -func (this *TagEvent) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TagEvent{`, - `Created:` + strings.Replace(strings.Replace(this.Created.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `}`, - }, "") - return s -} -func (this *TagEventCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TagEventCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `}`, - }, "") - return s -} -func (this *TagImportPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TagImportPolicy{`, - `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, - `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`, - `}`, - }, "") - return s -} -func (this *TagReference) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&TagReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, - `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, - `Generation:` + valueToStringGenerated(this.Generation) + `,`, - `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, - `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TagReferencePolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TagReferencePolicy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *DockerImageReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Registry = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tag = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Image) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Image: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageReference = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageManifest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{}) - if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signatures = append(m.Signatures, ImageSignature{}) - if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx)) - copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageConfig = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Config = &s - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ImageMissing = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.To == nil { - m.To = &k8s_io_api_core_v1.LocalObjectReference{} - } - if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludeManifest = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageImportStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Image == nil { - m.Image = &Image{} - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tag = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageLayer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) - } - m.LayerSize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.LayerSize |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageLayerData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LayerSize = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MediaType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Image{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Local = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageSignature) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...) - if m.Content == nil { - m.Content = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, SignatureCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImageIdentity = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SignedClaims == nil { - m.SignedClaims = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.SignedClaims[mapkey] = mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Created == nil { - m.Created = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IssuedBy == nil { - m.IssuedBy = &SignatureIssuer{} - } - if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IssuedTo == nil { - m.IssuedTo = &SignatureSubject{} - } - if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStream) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStream: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamImport) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Import = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Repository == nil { - m.Repository = &RepositoryImportSpec{} - } - if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, ImageImportSpec{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Import == nil { - m.Import = &ImageStream{} - } - if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Repository == nil { - m.Repository = &RepositoryImportStatus{} - } - if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, ImageImportStatus{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Blobs == nil { - m.Blobs = make(map[string]ImageLayerData) - } - var mapkey string - mapvalue := &ImageLayerData{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ImageLayerData{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Blobs[mapkey] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Images == nil { - m.Images = make(map[string]ImageBlobReferences) - } - var mapkey string - mapvalue := &ImageBlobReferences{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &ImageBlobReferences{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Images[mapkey] = *mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ImageStream{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tag = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, TagReference{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tags = append(m.Tags, NamedTagEventList{}) - if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamTag) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Tag == nil { - m.Tag = &TagReference{} - } - if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, TagEventCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ImageStreamTag{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedTagEventList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tag = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, TagEvent{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, TagEventCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IncludeManifest = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, ImageImportStatus{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignatureCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = SignatureConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Organization = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CommonName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignatureIssuer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignatureSubject) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PublicKeyID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TagEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TagEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DockerImageReference = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TagEventCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = TagEventConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - m.Generation = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Generation |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TagImportPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Insecure = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Scheduled = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TagReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TagReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.From == nil { - m.From = &k8s_io_api_core_v1.ObjectReference{} - } - if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Reference = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Generation = &v - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2450 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4f, 0x6c, 0x1c, 0x49, - 0xd5, 0x4f, 0x4f, 0xcf, 0x8c, 0xc7, 0xcf, 0x8e, 0x1d, 0x57, 0xe2, 0xec, 0xec, 0x24, 0x6b, 0x7b, - 0x3b, 0x5f, 0xa2, 0x7c, 0x90, 0xed, 0xc1, 0x26, 0xbb, 0x38, 0x41, 0x62, 0x37, 0x93, 0x09, 0xd1, - 0x80, 0x4d, 0xbc, 0xe5, 0x21, 0x87, 0x28, 0x48, 0x94, 0x7b, 0xca, 0xed, 0xc6, 0x33, 0xdd, 0x43, - 0x77, 0x8f, 0x77, 0x1d, 0x81, 0xc4, 0x01, 0xad, 0xf6, 0xc0, 0x01, 0x4e, 0x1c, 0xf6, 0x88, 0x56, - 0x88, 0x33, 0x02, 0x71, 0x07, 0xa4, 0x88, 0x0b, 0xab, 0xe5, 0xb2, 0x17, 0x2c, 0x32, 0x70, 0xe6, - 0xc6, 0x65, 0x4f, 0xa8, 0xaa, 0xab, 0xbb, 0xab, 0x7b, 0x7a, 0x9c, 0x9e, 0x10, 0x5b, 0x70, 0xf3, - 0xd4, 0x7b, 0xef, 0xf7, 0x5e, 0xbd, 0x57, 0xef, 0x4f, 0x55, 0x1b, 0x56, 0x4d, 0xcb, 0xdf, 0x1b, - 0xec, 0xe8, 0x86, 0xd3, 0xab, 0x3b, 0x7d, 0x6a, 0x7b, 0x7b, 0xd6, 0xae, 0x5f, 0x27, 0x7d, 0xab, - 0x6e, 0xf5, 0x88, 0x49, 0xeb, 0x07, 0xab, 0x75, 0x93, 0xda, 0xd4, 0x25, 0x3e, 0xed, 0xe8, 0x7d, - 0xd7, 0xf1, 0x1d, 0xf4, 0x7a, 0x2c, 0xa2, 0x47, 0x22, 0x3a, 0xe9, 0x5b, 0x3a, 0x17, 0xd1, 0x0f, - 0x56, 0x6b, 0x6f, 0x48, 0xa8, 0xa6, 0x63, 0x3a, 0x75, 0x2e, 0xb9, 0x33, 0xd8, 0xe5, 0xbf, 0xf8, - 0x0f, 0xfe, 0x57, 0x80, 0x58, 0xd3, 0xf6, 0xd7, 0x3d, 0xdd, 0x72, 0xb8, 0x5a, 0xc3, 0x71, 0xb3, - 0xb4, 0xd6, 0x6e, 0xc6, 0x3c, 0x3d, 0x62, 0xec, 0x59, 0x36, 0x75, 0x0f, 0xeb, 0xfd, 0x7d, 0x93, - 0x2d, 0x78, 0xf5, 0x1e, 0xf5, 0x49, 0x96, 0x54, 0x7d, 0x9c, 0x94, 0x3b, 0xb0, 0x7d, 0xab, 0x47, - 0x47, 0x04, 0xde, 0x7a, 0x9e, 0x80, 0x67, 0xec, 0xd1, 0x1e, 0x49, 0xcb, 0x69, 0x9f, 0x2a, 0x70, - 0xa1, 0xe9, 0x18, 0xfb, 0xd4, 0x6d, 0x31, 0x27, 0x60, 0xba, 0x4b, 0x5d, 0x6a, 0x1b, 0x14, 0xdd, - 0x80, 0x8a, 0x4b, 0x4d, 0xcb, 0xf3, 0xdd, 0xc3, 0xaa, 0xb2, 0xa2, 0x5c, 0x9f, 0x6e, 0x9c, 0x7b, - 0x7a, 0xb4, 0x7c, 0x66, 0x78, 0xb4, 0x5c, 0xc1, 0x62, 0x1d, 0x47, 0x1c, 0xa8, 0x0e, 0xd3, 0x36, - 0xe9, 0x51, 0xaf, 0x4f, 0x0c, 0x5a, 0x2d, 0x70, 0xf6, 0x05, 0xc1, 0x3e, 0xfd, 0xad, 0x90, 0x80, - 0x63, 0x1e, 0xb4, 0x02, 0x45, 0xf6, 0xa3, 0xaa, 0x72, 0xde, 0x59, 0xc1, 0x5b, 0x64, 0xbc, 0x98, - 0x53, 0xd0, 0x6b, 0xa0, 0xfa, 0xc4, 0xac, 0x16, 0x39, 0xc3, 0x8c, 0x60, 0x50, 0xdb, 0xc4, 0xc4, - 0x6c, 0x1d, 0xd5, 0xa0, 0x60, 0x35, 0xab, 0x25, 0x4e, 0x05, 0x41, 0x2d, 0xb4, 0x9a, 0xb8, 0x60, - 0x35, 0xb5, 0x3f, 0x4f, 0x41, 0x89, 0x6f, 0x07, 0x7d, 0x17, 0x2a, 0xcc, 0xc5, 0x1d, 0xe2, 0x13, - 0xbe, 0x8b, 0x99, 0xb5, 0x2f, 0xe9, 0x81, 0xa7, 0x74, 0xd9, 0x53, 0x7a, 0x7f, 0xdf, 0x64, 0x0b, - 0x9e, 0xce, 0xb8, 0xf5, 0x83, 0x55, 0xfd, 0xc1, 0xce, 0xf7, 0xa8, 0xe1, 0x6f, 0x52, 0x9f, 0x34, - 0x90, 0x40, 0x87, 0x78, 0x0d, 0x47, 0xa8, 0x68, 0x0b, 0x2e, 0x74, 0x32, 0xfc, 0x27, 0x9c, 0x70, - 0x59, 0xc8, 0x66, 0xfa, 0x18, 0x67, 0x4a, 0xa2, 0x1f, 0xc0, 0x79, 0x69, 0x7d, 0x33, 0x34, 0x5f, - 0xe5, 0xe6, 0xbf, 0x31, 0xd6, 0x7c, 0x11, 0x68, 0x1d, 0x93, 0xf7, 0xee, 0xbd, 0xef, 0x53, 0xdb, - 0xb3, 0x1c, 0xbb, 0x71, 0x49, 0xe8, 0x3f, 0xdf, 0x1c, 0x45, 0xc4, 0x59, 0x6a, 0xd0, 0x0e, 0xd4, - 0x32, 0x96, 0x1f, 0x52, 0x97, 0xe1, 0x89, 0x68, 0x68, 0x02, 0xb5, 0xd6, 0x1c, 0xcb, 0x89, 0x8f, - 0x41, 0x41, 0x9b, 0xc9, 0x1d, 0x12, 0xdb, 0xda, 0xa5, 0x9e, 0x2f, 0x82, 0x99, 0x69, 0xb2, 0x60, - 0xc1, 0x59, 0x72, 0xe8, 0x00, 0x16, 0xa4, 0xe5, 0x0d, 0x72, 0x48, 0x5d, 0xaf, 0x5a, 0x5e, 0x51, - 0xb9, 0xbb, 0x9e, 0x9b, 0xf4, 0x7a, 0x2c, 0xd5, 0x78, 0x55, 0xe8, 0x5e, 0x68, 0xa6, 0xf1, 0xf0, - 0xa8, 0x0a, 0x44, 0x01, 0x3c, 0xcb, 0xb4, 0x89, 0x3f, 0x70, 0xa9, 0x57, 0x9d, 0xe2, 0x0a, 0x57, - 0xf3, 0x2a, 0xdc, 0x0e, 0x25, 0xe3, 0xf3, 0x15, 0x2d, 0x79, 0x58, 0x02, 0x46, 0x0f, 0x60, 0x51, - 0xd2, 0x1d, 0x33, 0x55, 0x2b, 0x2b, 0xea, 0xf5, 0xd9, 0xc6, 0xab, 0xc3, 0xa3, 0xe5, 0xc5, 0x66, - 0x16, 0x03, 0xce, 0x96, 0x43, 0x7b, 0x70, 0x39, 0xc3, 0x8d, 0x9b, 0xb4, 0x63, 0x91, 0xf6, 0x61, - 0x9f, 0x56, 0xa7, 0x79, 0x1c, 0xfe, 0x4f, 0x98, 0x75, 0xb9, 0x79, 0x0c, 0x2f, 0x3e, 0x16, 0x09, - 0xdd, 0x4f, 0x44, 0xe6, 0xae, 0x63, 0xef, 0x5a, 0x66, 0x15, 0x38, 0x7c, 0x96, 0xab, 0x03, 0x06, - 0x3c, 0x2a, 0xa3, 0xfd, 0x5c, 0x81, 0xf3, 0xfc, 0x77, 0xa3, 0xeb, 0xec, 0x44, 0xa9, 0xe2, 0x21, - 0x0d, 0xca, 0xdd, 0x20, 0xde, 0xca, 0x8a, 0xca, 0x2a, 0xc1, 0xf0, 0x68, 0xb9, 0x2c, 0x22, 0x26, - 0x28, 0x8c, 0xc7, 0x08, 0x34, 0x07, 0x39, 0xc9, 0x79, 0x84, 0x2a, 0x41, 0x41, 0xeb, 0x30, 0xcb, - 0xc3, 0xb3, 0x69, 0x79, 0x9e, 0x65, 0x9b, 0x3c, 0xd9, 0x2a, 0x8d, 0x0b, 0xc2, 0xc6, 0xd9, 0x96, - 0x44, 0xc3, 0x09, 0x4e, 0xed, 0x8f, 0x2a, 0xcc, 0x73, 0x72, 0xab, 0xd7, 0x77, 0x5c, 0x7f, 0xbb, - 0x4f, 0x0d, 0x74, 0x0f, 0x8a, 0xbb, 0xae, 0xd3, 0x13, 0x15, 0xe7, 0x8a, 0x94, 0xb2, 0x3a, 0x6b, - 0x13, 0x71, 0x7d, 0x89, 0x76, 0x12, 0x57, 0xc0, 0xaf, 0xbb, 0x4e, 0x0f, 0x73, 0x71, 0xf4, 0x0e, - 0x14, 0x7c, 0x87, 0x1b, 0x3d, 0xb3, 0x76, 0x3d, 0x0b, 0x64, 0xc3, 0x31, 0x48, 0x37, 0x8d, 0x54, - 0x66, 0x85, 0xb0, 0xed, 0xe0, 0x82, 0xef, 0xa0, 0x2e, 0xdb, 0x16, 0x33, 0x6b, 0xcb, 0xe9, 0x5a, - 0xc6, 0xa1, 0xa8, 0x21, 0x6b, 0x39, 0xce, 0x68, 0x9b, 0x98, 0x2d, 0x49, 0x52, 0x76, 0x45, 0xbc, - 0x8a, 0x13, 0xe8, 0xe8, 0x0e, 0xcc, 0x5b, 0xb6, 0xd1, 0x1d, 0x74, 0xe2, 0x94, 0x2e, 0x72, 0x3f, - 0xbe, 0x22, 0x84, 0xe7, 0x5b, 0x49, 0x32, 0x4e, 0xf3, 0xa3, 0xf7, 0x61, 0xde, 0x0d, 0x77, 0x22, - 0x6c, 0x2e, 0x71, 0x9b, 0xdf, 0xcc, 0x67, 0x33, 0x4e, 0x0a, 0xc7, 0x9a, 0x53, 0x04, 0x9c, 0x56, - 0xa3, 0xfd, 0x45, 0x81, 0x05, 0x39, 0x8e, 0x3e, 0xf1, 0x07, 0x1e, 0x6a, 0x43, 0xd9, 0xe3, 0x7f, - 0x89, 0x58, 0xde, 0xc8, 0xd7, 0x3d, 0x02, 0xe9, 0xc6, 0x9c, 0xd0, 0x5e, 0x0e, 0x7e, 0x63, 0x81, - 0x85, 0x5a, 0x50, 0xe2, 0x46, 0x47, 0xb1, 0xcd, 0x59, 0x33, 0x1a, 0xd3, 0xc3, 0xa3, 0xe5, 0xa0, - 0xb3, 0xe1, 0x00, 0x21, 0xec, 0x92, 0x6a, 0x76, 0x97, 0xd4, 0x3e, 0x50, 0x00, 0xe2, 0x92, 0x15, - 0x75, 0x5d, 0x65, 0x6c, 0xd7, 0xbd, 0x0a, 0x45, 0xcf, 0x7a, 0x12, 0x58, 0xa6, 0xc6, 0x3d, 0x9c, - 0x8b, 0x6f, 0x5b, 0x4f, 0x28, 0xe6, 0x64, 0xd6, 0xef, 0x7b, 0x51, 0xbd, 0x50, 0x93, 0xfd, 0x3e, - 0x2e, 0x0e, 0x31, 0x8f, 0xd6, 0x81, 0xb9, 0xd8, 0x8e, 0x26, 0x6b, 0x34, 0xaf, 0x0b, 0x4d, 0x0a, - 0xd7, 0x74, 0xf6, 0xb9, 0x5a, 0x0a, 0x39, 0xb4, 0xfc, 0x4e, 0x81, 0xe9, 0x40, 0x8d, 0xe5, 0xf9, - 0xe8, 0xf1, 0x48, 0xf3, 0xd7, 0xf3, 0x85, 0x8f, 0x49, 0xf3, 0xd6, 0x1f, 0x8d, 0x3c, 0xe1, 0x8a, - 0xd4, 0xf8, 0x37, 0xa1, 0x64, 0xf9, 0xb4, 0xe7, 0x55, 0x0b, 0xbc, 0xf0, 0xe7, 0x0f, 0xe2, 0x59, - 0x01, 0x5a, 0x6a, 0x31, 0x71, 0x1c, 0xa0, 0x68, 0xeb, 0xe2, 0xf8, 0x6d, 0x38, 0xce, 0xfe, 0xa0, - 0x2f, 0x32, 0xea, 0x0a, 0x94, 0xba, 0x2c, 0xc7, 0x45, 0x3d, 0x8a, 0x24, 0x79, 0xe2, 0xe3, 0x80, - 0xa6, 0xfd, 0xba, 0x2c, 0x7c, 0x1b, 0x95, 0xf8, 0x53, 0x18, 0x7b, 0x56, 0xa0, 0xe8, 0xc7, 0x51, - 0x89, 0x4e, 0x12, 0x0f, 0x08, 0xa7, 0xa0, 0xab, 0x30, 0x65, 0x38, 0xb6, 0x4f, 0x6d, 0x9f, 0x5b, - 0x3f, 0xdb, 0x98, 0x19, 0x1e, 0x2d, 0x4f, 0xdd, 0x0d, 0x96, 0x70, 0x48, 0x43, 0x16, 0x80, 0xe1, - 0xd8, 0x1d, 0xcb, 0xb7, 0x1c, 0xdb, 0xab, 0x16, 0xb9, 0x2f, 0xf3, 0x24, 0x7b, 0xb4, 0xd9, 0xbb, - 0xa1, 0x74, 0x6c, 0x71, 0xb4, 0xe4, 0x61, 0x09, 0x1c, 0x7d, 0x15, 0xce, 0x72, 0xf1, 0x56, 0x87, - 0xda, 0xbe, 0xe5, 0x1f, 0x8a, 0x81, 0x63, 0x51, 0x88, 0x9d, 0x6d, 0xc9, 0x44, 0x9c, 0xe4, 0x45, - 0x3f, 0x84, 0x59, 0xd6, 0x93, 0x69, 0xe7, 0x6e, 0x97, 0x58, 0xbd, 0x70, 0xbe, 0xb8, 0x3b, 0x71, - 0xbb, 0xe7, 0x86, 0x87, 0x28, 0xf7, 0x6c, 0xdf, 0x95, 0x6a, 0xab, 0x4c, 0xc2, 0x09, 0x75, 0xe8, - 0x5d, 0x98, 0x32, 0x5c, 0xca, 0x06, 0xf7, 0xea, 0x14, 0x0f, 0xe8, 0x17, 0xf2, 0x05, 0xb4, 0x6d, - 0xf5, 0xa8, 0xf0, 0x7c, 0x20, 0x8e, 0x43, 0x1c, 0x96, 0x1e, 0x96, 0xe7, 0x0d, 0x68, 0xa7, 0x71, - 0x58, 0xad, 0xe4, 0x6e, 0x0c, 0xd1, 0x46, 0x5a, 0x4c, 0xd6, 0x6d, 0xcc, 0xb2, 0xf4, 0x68, 0x09, - 0x1c, 0x1c, 0x21, 0xa2, 0xef, 0x84, 0xe8, 0x6d, 0x87, 0x0f, 0x14, 0x33, 0x6b, 0x5f, 0x9e, 0x04, - 0x7d, 0x7b, 0xc0, 0x4f, 0x9d, 0x0c, 0xdf, 0x76, 0x70, 0x04, 0x59, 0x7b, 0x1b, 0x16, 0x46, 0x1c, - 0x89, 0xce, 0x81, 0xba, 0x4f, 0xc5, 0x75, 0x05, 0xb3, 0x3f, 0xd1, 0x05, 0x28, 0x1d, 0x90, 0xee, - 0x40, 0x9c, 0x53, 0x1c, 0xfc, 0xb8, 0x5d, 0x58, 0x57, 0xb4, 0x5f, 0x14, 0x60, 0x26, 0x88, 0x8c, - 0xef, 0x52, 0xd2, 0x3b, 0x85, 0x94, 0x69, 0x43, 0xd1, 0xeb, 0x53, 0x43, 0x14, 0xfd, 0xb5, 0xdc, - 0x27, 0x87, 0xdb, 0xc7, 0xe6, 0x8a, 0x38, 0xcd, 0xd8, 0x2f, 0xcc, 0xd1, 0xd0, 0xe3, 0xa8, 0x43, - 0x05, 0xcd, 0xfd, 0xe6, 0x84, 0xb8, 0xc7, 0x76, 0x2a, 0xed, 0xf7, 0x0a, 0x9c, 0x93, 0xb8, 0x4f, - 0xeb, 0x52, 0xb5, 0xf9, 0xa2, 0x0d, 0x32, 0xae, 0xad, 0x52, 0x93, 0xd4, 0x7e, 0x53, 0x10, 0xc5, - 0x35, 0xdc, 0x05, 0xeb, 0xf0, 0xa7, 0xb0, 0x8d, 0x47, 0x89, 0x88, 0xaf, 0x4f, 0x16, 0x99, 0x78, - 0x9e, 0xcc, 0x8c, 0xfb, 0x4e, 0x2a, 0xee, 0xb7, 0x5f, 0x08, 0xfd, 0xf8, 0xe8, 0xff, 0xb8, 0x00, - 0x8b, 0x99, 0x16, 0xa1, 0x6b, 0x50, 0x0e, 0x46, 0x3f, 0xee, 0xb9, 0x4a, 0x8c, 0x10, 0xf0, 0x60, - 0x41, 0x45, 0x26, 0x80, 0x4b, 0xfb, 0x8e, 0x67, 0xf9, 0x8e, 0x7b, 0x28, 0xfc, 0xf0, 0x95, 0x1c, - 0x96, 0xe2, 0x48, 0x48, 0x72, 0xc3, 0x1c, 0x73, 0x74, 0x4c, 0xc1, 0x12, 0x34, 0x7a, 0xc4, 0x0c, - 0x22, 0x26, 0x65, 0xee, 0x50, 0x27, 0x49, 0x2f, 0x19, 0x3f, 0xde, 0x04, 0x43, 0xc2, 0x02, 0x51, - 0xfb, 0x6d, 0x01, 0x5e, 0x19, 0xe3, 0x3a, 0x84, 0x13, 0x8e, 0x60, 0x13, 0xc6, 0x44, 0x61, 0x08, - 0x2e, 0x23, 0x29, 0xa7, 0x59, 0x19, 0x4e, 0xbb, 0xf5, 0x22, 0x4e, 0x13, 0xd1, 0x3d, 0xc6, 0x6d, - 0x8f, 0x53, 0x6e, 0xbb, 0x39, 0xa1, 0xdb, 0x52, 0xe7, 0x27, 0xe5, 0xb8, 0x8f, 0x8b, 0x89, 0xbc, - 0x13, 0xd7, 0xe6, 0x93, 0xcf, 0xbb, 0x0e, 0x94, 0x76, 0xba, 0xce, 0x4e, 0x38, 0x9a, 0xbd, 0x3d, - 0x59, 0x4c, 0x02, 0x33, 0x75, 0x76, 0xd3, 0x14, 0x0d, 0x3a, 0xaa, 0x2a, 0x7c, 0x0d, 0x07, 0xe0, - 0x68, 0x2f, 0xe5, 0xbb, 0x77, 0x5e, 0x48, 0x4d, 0xe0, 0xb2, 0x40, 0xcf, 0x18, 0x3f, 0xd6, 0xf6, - 0x01, 0x62, 0x6b, 0x32, 0xba, 0xdc, 0x7d, 0xb9, 0xcb, 0x4d, 0xf0, 0x06, 0x11, 0x0d, 0xe3, 0x52, - 0x63, 0xac, 0x7d, 0x5f, 0xf4, 0xc5, 0xb1, 0xda, 0x36, 0x92, 0xda, 0xde, 0xca, 0x5d, 0x9c, 0x13, - 0x57, 0x77, 0xb9, 0x17, 0xff, 0x41, 0x11, 0x77, 0x68, 0xe1, 0x99, 0x93, 0x1f, 0xde, 0xb7, 0x93, - 0xc3, 0xfb, 0xa4, 0x59, 0x9b, 0x3d, 0xc2, 0xff, 0x43, 0x01, 0x24, 0x71, 0x6d, 0x92, 0x7e, 0xdf, - 0xb2, 0xcd, 0xff, 0xb9, 0x76, 0xf9, 0xbc, 0x3b, 0xe5, 0xaf, 0x0a, 0x89, 0x68, 0xf1, 0x7e, 0xb0, - 0x9d, 0x78, 0xa3, 0x8a, 0x8b, 0x8d, 0xb8, 0x69, 0xbe, 0x26, 0x40, 0x16, 0x9b, 0x59, 0x4c, 0x38, - 0x5b, 0x16, 0xbd, 0x0b, 0x45, 0x9f, 0x98, 0x61, 0x8c, 0xea, 0x13, 0xbe, 0x00, 0x48, 0x97, 0x12, - 0x62, 0x7a, 0x98, 0x43, 0x21, 0x1b, 0x66, 0xbb, 0xd2, 0x05, 0x6b, 0xd2, 0x99, 0x49, 0xbe, 0x9c, - 0xc5, 0x63, 0xbb, 0xbc, 0x8a, 0x13, 0xf8, 0xda, 0x2f, 0x93, 0x93, 0x87, 0x68, 0x1a, 0x27, 0xe2, - 0xad, 0x87, 0x09, 0x6f, 0xe5, 0xd9, 0x12, 0xbb, 0xf6, 0x77, 0xda, 0xc4, 0xbc, 0x77, 0x40, 0x6d, - 0x9f, 0x25, 0x49, 0xa6, 0xcb, 0x28, 0x5c, 0xea, 0x0f, 0x76, 0xba, 0x96, 0x91, 0x69, 0x8d, 0x38, - 0x25, 0x57, 0x84, 0xe0, 0xa5, 0xad, 0xf1, 0xac, 0xf8, 0x38, 0x1c, 0xed, 0xa3, 0x62, 0x78, 0x8b, - 0xe5, 0x9e, 0x6a, 0x93, 0xd3, 0x48, 0x9c, 0x6f, 0x04, 0x27, 0x3d, 0x48, 0x9b, 0x89, 0x0f, 0xd8, - 0x54, 0xe2, 0x83, 0xc4, 0x1a, 0x80, 0xf8, 0xb8, 0x62, 0x39, 0x36, 0x77, 0x8b, 0x1a, 0x6b, 0xbf, - 0x1f, 0x51, 0xb0, 0xc4, 0x85, 0xf6, 0x32, 0x2e, 0xbf, 0x37, 0xf3, 0x99, 0xc1, 0x83, 0x96, 0xff, - 0xee, 0x1b, 0x95, 0x88, 0xd2, 0x4b, 0x29, 0x11, 0xe9, 0x3c, 0x2a, 0x9f, 0x70, 0x1e, 0xfd, 0x29, - 0x59, 0x5a, 0xdb, 0xc4, 0x3c, 0x85, 0x26, 0xf1, 0x30, 0xd9, 0x24, 0x56, 0x27, 0x6b, 0x12, 0x6d, - 0x62, 0x8e, 0xe9, 0x13, 0xff, 0x54, 0x60, 0x61, 0x24, 0xf7, 0xc2, 0xaa, 0xab, 0x8c, 0xf9, 0xde, - 0xb5, 0x95, 0x34, 0xe6, 0x8b, 0x13, 0x9c, 0x92, 0x6c, 0x33, 0x52, 0x87, 0x4f, 0x3d, 0xb9, 0xc3, - 0xa7, 0x7d, 0xa8, 0xc2, 0x85, 0xac, 0x89, 0xfe, 0x65, 0x3d, 0x94, 0xa7, 0x9f, 0xb9, 0x0b, 0xa7, - 0xfd, 0xcc, 0xad, 0xfe, 0xe7, 0xcf, 0xdc, 0xc5, 0xd3, 0x79, 0xe6, 0xfe, 0xb0, 0x00, 0x17, 0xb3, - 0xef, 0x09, 0x27, 0xf4, 0xd6, 0x1d, 0xdf, 0x30, 0x0a, 0x2f, 0xff, 0x86, 0x81, 0x6e, 0xc3, 0x1c, - 0xe9, 0x04, 0xc7, 0x8c, 0x74, 0x59, 0xd3, 0xe2, 0xe7, 0x78, 0xba, 0x81, 0x86, 0x47, 0xcb, 0x73, - 0x77, 0x12, 0x14, 0x9c, 0xe2, 0xd4, 0x3e, 0x55, 0x01, 0x8d, 0xbe, 0x22, 0xa2, 0xdb, 0xe2, 0x65, - 0x33, 0x48, 0xc4, 0x6b, 0xf2, 0xcb, 0xe6, 0xe7, 0x47, 0xcb, 0x17, 0x47, 0x25, 0xa4, 0x37, 0xcf, - 0x8d, 0xc8, 0x85, 0xc1, 0xbb, 0xe8, 0xcd, 0xa4, 0x53, 0x3e, 0x3f, 0x5a, 0xce, 0xf8, 0x97, 0x01, - 0x3d, 0x42, 0x4a, 0xb9, 0xce, 0x84, 0xb3, 0x5d, 0xe2, 0xf9, 0x5b, 0xae, 0xb3, 0x43, 0xdb, 0x96, - 0xf8, 0x58, 0x3e, 0xd9, 0xcb, 0x5f, 0xf4, 0xb6, 0xb9, 0x21, 0x03, 0xe1, 0x24, 0x2e, 0x3a, 0x00, - 0xc4, 0x16, 0xda, 0x2e, 0xb1, 0xbd, 0x60, 0x4b, 0x4c, 0x5b, 0x71, 0x62, 0x6d, 0x35, 0xa1, 0x0d, - 0x6d, 0x8c, 0xa0, 0xe1, 0x0c, 0x0d, 0xe8, 0x1a, 0x94, 0x5d, 0x4a, 0x3c, 0xc7, 0x16, 0x2f, 0xb1, - 0x51, 0x94, 0x31, 0x5f, 0xc5, 0x82, 0x8a, 0xfe, 0x1f, 0xa6, 0x7a, 0xd4, 0xf3, 0x58, 0xfb, 0x2a, - 0x73, 0xc6, 0x79, 0xc1, 0x38, 0xb5, 0x19, 0x2c, 0xe3, 0x90, 0xae, 0x7d, 0xa0, 0x40, 0x1c, 0x22, - 0xde, 0x75, 0x2d, 0xe3, 0x5e, 0xf0, 0x82, 0xbb, 0x0e, 0xb3, 0x8e, 0x6b, 0x12, 0xdb, 0x7a, 0x12, - 0xb4, 0xe8, 0x20, 0xc0, 0x51, 0xc6, 0x3f, 0x90, 0x68, 0x38, 0xc1, 0xc9, 0x5a, 0xbb, 0xe1, 0xf4, - 0x7a, 0x8e, 0xcd, 0xaa, 0xb6, 0x08, 0xad, 0x54, 0xf3, 0x42, 0x0a, 0x96, 0xb8, 0xb4, 0x8f, 0x15, - 0x98, 0x4f, 0xbd, 0x95, 0xa2, 0x9f, 0x29, 0x70, 0xd1, 0xcb, 0x34, 0x4e, 0xa4, 0xdc, 0xad, 0x49, - 0x9e, 0x48, 0x13, 0x00, 0x8d, 0x25, 0x61, 0xcf, 0x98, 0xdd, 0xe3, 0x31, 0x8a, 0xb5, 0xbf, 0x2a, - 0x70, 0x2e, 0xfd, 0xea, 0xfa, 0xdf, 0x68, 0x28, 0x7a, 0x13, 0x66, 0x82, 0xf9, 0xf1, 0x9b, 0xf4, - 0xb0, 0xd5, 0x14, 0x51, 0x38, 0x2f, 0xc0, 0x66, 0xb6, 0x62, 0x12, 0x96, 0xf9, 0xb4, 0x9f, 0x14, - 0xa0, 0x12, 0x76, 0x2c, 0xf4, 0xed, 0xf8, 0x15, 0x5d, 0x99, 0xf8, 0x74, 0x47, 0x87, 0x6e, 0xe4, - 0x25, 0xfd, 0xe5, 0xff, 0x0f, 0xc8, 0x95, 0x70, 0x5c, 0x0b, 0xc6, 0xeb, 0xec, 0x21, 0x2c, 0x39, - 0x71, 0x16, 0xf3, 0x4c, 0x9c, 0xda, 0x47, 0x2a, 0x2c, 0x8c, 0x34, 0x70, 0x74, 0x2b, 0x51, 0xf3, - 0xae, 0xa6, 0x6a, 0xde, 0xe2, 0x88, 0xc0, 0x89, 0x95, 0xbc, 0xec, 0x4a, 0xa4, 0x9e, 0x62, 0x25, - 0x2a, 0xe6, 0xad, 0x44, 0xa5, 0xe3, 0x2b, 0x51, 0x2a, 0x3a, 0xe5, 0x5c, 0xd1, 0xe9, 0xc3, 0x7c, - 0x6a, 0x22, 0x41, 0x37, 0xa0, 0x62, 0xd9, 0x1e, 0x35, 0x06, 0x2e, 0x15, 0x6f, 0xad, 0xd1, 0xc8, - 0xda, 0x12, 0xeb, 0x38, 0xe2, 0x40, 0x75, 0x98, 0xf6, 0x8c, 0x3d, 0xda, 0x19, 0x74, 0x69, 0x87, - 0x07, 0xa4, 0x12, 0x7f, 0x31, 0xdd, 0x0e, 0x09, 0x38, 0xe6, 0xd1, 0xfe, 0x55, 0x84, 0x59, 0x79, - 0xa0, 0xc8, 0xf1, 0x89, 0xf8, 0x3d, 0x98, 0x21, 0xb6, 0xed, 0xf8, 0x24, 0x18, 0x1c, 0x0b, 0xb9, - 0x1f, 0xbf, 0x64, 0x3d, 0xfa, 0x9d, 0x18, 0x22, 0x78, 0xfc, 0x8a, 0x52, 0x59, 0xa2, 0x60, 0x59, - 0x13, 0xba, 0x23, 0xa6, 0x45, 0x35, 0xff, 0xb4, 0x58, 0x49, 0x4d, 0x8a, 0x75, 0x98, 0x8e, 0x26, - 0x22, 0xf1, 0xcf, 0x09, 0x91, 0x7f, 0xe2, 0x9c, 0x8c, 0x79, 0x90, 0x9e, 0x88, 0x62, 0x89, 0x47, - 0x71, 0xee, 0x98, 0x1b, 0x5d, 0x7a, 0x14, 0x2d, 0x9f, 0xe8, 0x28, 0x9a, 0x31, 0x47, 0x4e, 0x9d, - 0xca, 0x1c, 0x59, 0xfb, 0x1a, 0x9c, 0x4b, 0x47, 0x70, 0xa2, 0xcf, 0x6f, 0x5b, 0x80, 0x46, 0xf5, - 0x3f, 0x6f, 0xf6, 0x1a, 0x95, 0x88, 0x0b, 0x51, 0xe3, 0xfa, 0xd3, 0x67, 0x4b, 0x67, 0x3e, 0x79, - 0xb6, 0x74, 0xe6, 0xb3, 0x67, 0x4b, 0x67, 0x7e, 0x34, 0x5c, 0x52, 0x9e, 0x0e, 0x97, 0x94, 0x4f, - 0x86, 0x4b, 0xca, 0x67, 0xc3, 0x25, 0xe5, 0x6f, 0xc3, 0x25, 0xe5, 0xa7, 0x7f, 0x5f, 0x3a, 0xf3, - 0xa8, 0x70, 0xb0, 0xfa, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x03, 0x52, 0xf9, 0x3c, 0x2a, - 0x00, 0x00, -} diff --git a/vendor/github.com/openshift/api/image/v1/legacy.go b/vendor/github.com/openshift/api/image/v1/legacy.go deleted file mode 100644 index 02bbaa2906..0000000000 --- a/vendor/github.com/openshift/api/image/v1/legacy.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1 - -import ( - "github.com/openshift/api/image/docker10" - "github.com/openshift/api/image/dockerpre012" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} - legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme) - DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addLegacyKnownTypes(scheme *runtime.Scheme) error { - types := []runtime.Object{ - &Image{}, - &ImageList{}, - &ImageSignature{}, - &ImageStream{}, - &ImageStreamList{}, - &ImageStreamMapping{}, - &ImageStreamTag{}, - &ImageStreamTagList{}, - &ImageStreamImage{}, - &ImageStreamImport{}, - } - scheme.AddKnownTypes(legacyGroupVersion, types...) - return nil -} diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go deleted file mode 100644 index 46f785c471..0000000000 --- a/vendor/github.com/openshift/api/image/v1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - "github.com/openshift/api/image/docker10" - "github.com/openshift/api/image/dockerpre012" -) - -var ( - GroupName = "image.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme -) - -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, - &Image{}, - &ImageList{}, - &ImageSignature{}, - &ImageStream{}, - &ImageStreamList{}, - &ImageStreamMapping{}, - &ImageStreamTag{}, - &ImageStreamTagList{}, - &ImageStreamImage{}, - &ImageStreamLayers{}, - &ImageStreamImport{}, - &corev1.SecretList{}, - ) - metav1.AddToGroupVersion(scheme, GroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go deleted file mode 100644 index 90b70f4670..0000000000 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ /dev/null @@ -1,577 +0,0 @@ -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -const ( - // ResourceImageStreams represents a number of image streams in a project. - ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" - - // ResourceImageStreamImages represents a number of unique references to images in all image stream - // statuses of a project. - ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" - - // ResourceImageStreamTags represents a number of unique references to images in all image stream specs - // of a project. - ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" - - // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set - // the maximum size of an image. - LimitTypeImage corev1.LimitType = "openshift.io/Image" - - // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number - // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". - LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageList is a list of Image objects. -type ImageList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of images - Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Image is an immutable representation of a container image and metadata at a point in time. -type Image struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DockerImageReference is the string that can be used to pull this image. - DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` - // DockerImageMetadata contains metadata about this image - // +patchStrategy=replace - DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` - // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" - DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` - // DockerImageManifest is the raw JSON of the manifest - DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` - // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data. - DockerImageLayers []ImageLayer `json:"dockerImageLayers" protobuf:"bytes,6,rep,name=dockerImageLayers"` - // Signatures holds all signatures of the image. - // +patchMergeKey=name - // +patchStrategy=merge - Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` - // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. - DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` - // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. - DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` - // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. - DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` -} - -// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. -type ImageLayer struct { - // Name of the layer as defined by the underlying store. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Size of the layer in bytes as defined by the underlying store. - LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` - // MediaType of the referenced object. - MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` -} - -// +genclient -// +genclient:onlyVerbs=create,delete -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims -// as long as the signature is trusted. Based on this information it is possible to restrict runnable images -// to those matching cluster-wide policy. -// Mandatory fields should be parsed by clients doing image verification. The others are parsed from -// signature's content by the server. They serve just an informative purpose. -type ImageSignature struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Required: Describes a type of stored blob. - Type string `json:"type" protobuf:"bytes,2,opt,name=type"` - // Required: An opaque binary string which is an image's signature. - Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` - // Conditions represent the latest available observations of a signature's current state. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` - - // Following metadata fields will be set by server if the signature content is successfully parsed and - // the information available. - - // A human readable string representing image's identity. It could be a product name and version, or an - // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). - ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"` - // Contains claims from the signature. - SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"` - // If specified, it is the time of signature's creation. - Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"` - // If specified, it holds information about an issuer of signing certificate or key (a person or entity - // who signed the signing certificate or key). - IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"` - // If specified, it holds information about a subject of signing certificate or key (a person or entity - // who signed the image). - IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"` -} - -/// SignatureConditionType is a type of image signature condition. -type SignatureConditionType string - -// SignatureCondition describes an image signature condition of particular kind at particular probe time. -type SignatureCondition struct { - // Type of signature condition, Complete or Failed. - Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition was checked. - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject -// of signing certificate or key. -type SignatureGenericEntity struct { - // Organization name. - Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` - // Common name (e.g. openshift-signing-service). - CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` -} - -// SignatureIssuer holds information about an issuer of signing certificate or key. -type SignatureIssuer struct { - SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` -} - -// SignatureSubject holds information about a person or entity who created the signature. -type SignatureSubject struct { - SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` - // If present, it is a human readable key id of public key belonging to the subject used to verify image - // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. - // 0x685ebe62bf278440). - PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamList is a list of ImageStream objects. -type ImageStreamList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of imageStreams - Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:method=Secrets,verb=get,subresource=secrets,result=k8s.io/api/core/v1.SecretList -// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStream stores a mapping of tags to images, metadata overrides that are applied -// when images are tagged in a stream, and an optional reference to a container image -// repository on a registry. -type ImageStream struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec describes the desired state of this stream - Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status describes the current state of this stream - Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ImageStreamSpec represents options for ImageStreams. -type ImageStreamSpec struct { - // lookupPolicy controls how other resources reference images within this namespace. - LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"` - // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server - // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. - // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. - DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"` - // tags map arbitrary string values to specific image locators - // +patchMergeKey=name - // +patchStrategy=merge - Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"` -} - -// ImageLookupPolicy describes how an image stream can be used to override the image references -// used by pods, builds, and other resources in a namespace. -type ImageLookupPolicy struct { - // local will change the docker short image references (like "mysql" or - // "php:latest") on objects in this namespace to the image ID whenever they match - // this image stream, instead of reaching out to a remote registry. The name will - // be fully qualified to an image ID if found. The tag's referencePolicy is taken - // into account on the replaced value. Only works within the current namespace. - Local bool `json:"local" protobuf:"varint,3,opt,name=local"` -} - -// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. -type TagReference struct { - // Name of the tag - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. - // +optional - Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` - // Optional; if specified, a reference to another image that this tag should point to. Valid values - // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references - // can only reference a tag within this same ImageStream. - From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` - // Reference states if the tag will be imported. Default value is false, which means the tag will - // be imported. - Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` - // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference - // is changed the generation is set to match the current stream generation (which is incremented every - // time spec is changed). Other processes in the system like the image importer observe that the - // generation of spec tag is newer than the generation recorded in the status and use that as a trigger - // to import the newest remote tag. To trigger a new import, clients may set this value to zero which - // will reset the generation to the latest stream generation. Legacy clients will send this value as - // nil which will be merged with the current tag generation. - // +optional - Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` - // ImportPolicy is information that controls how images may be imported by the server. - ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image. - ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` -} - -// TagImportPolicy controls how images related to this tag will be imported. -type TagImportPolicy struct { - // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. - Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` - // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported - Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` -} - -// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when -// image change triggers are fired. -type TagReferencePolicyType string - -const ( - // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag - // is resolved into other resources (builds and deployment configurations). - SourceTagReferencePolicy TagReferencePolicyType = "Source" - // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry, - // falling back to the remote location if the integrated registry has not been configured. The reference will - // use the internal DNS name or registry service IP. - LocalTagReferencePolicy TagReferencePolicyType = "Local" -) - -// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when -// image change triggers in deployment configs or builds are resolved. This allows the image stream -// author to control how images are accessed. -type TagReferencePolicy struct { - // Type determines how the image pull spec should be transformed when the image stream tag is used in - // deployment config triggers or new builds. The default value is `Source`, indicating the original - // location of the image should be used (if imported). The user may also specify `Local`, indicating - // that the pull spec should point to the integrated container image registry and leverage the registry's - // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this - // image to be managed from the image stream's namespace, so others on the platform can access a remote - // image but have no access to the remote secret. It also allows the image layers to be mirrored into - // the local registry which the images can still be pulled even if the upstream registry is unavailable. - Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"` -} - -// ImageStreamStatus contains information about the state of this image stream. -type ImageStreamStatus struct { - // DockerImageRepository represents the effective location this stream may be accessed at. - // May be empty until the server determines where the repository is located - DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` - // PublicDockerImageRepository represents the public location from where the image can - // be pulled outside the cluster. This field may be empty if the administrator - // has not exposed the integrated registry externally. - PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` - // Tags are a historical record of images associated with each tag. The first entry in the - // TagEvent array is the currently tagged image. - // +patchMergeKey=tag - // +patchStrategy=merge - Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` -} - -// NamedTagEventList relates a tag to its image history. -type NamedTagEventList struct { - // Tag is the tag for which the history is recorded - Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` - // Standard object's metadata. - Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` - // Conditions is an array of conditions that apply to the tag event list. - Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` -} - -// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. -type TagEvent struct { - // Created holds the time the TagEvent was created - Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` - // DockerImageReference is the string that can be used to pull this image - DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` - // Image is the image - Image string `json:"image" protobuf:"bytes,3,opt,name=image"` - // Generation is the spec tag generation that resulted in this tag being updated - Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` -} - -type TagEventConditionType string - -// These are valid conditions of TagEvents. -const ( - // ImportSuccess with status False means the import of the specific tag failed - ImportSuccess TagEventConditionType = "ImportSuccess" -) - -// TagEventCondition contains condition information for a tag event. -type TagEventCondition struct { - // Type of tag event condition, currently only ImportSuccess - Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` - // Status of the condition, one of True, False, Unknown. - Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // LastTransitionTIme is the time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // Reason is a brief machine readable explanation for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // Message is a human readable description of the details about last transition, complementing reason. - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` - // Generation is the spec tag generation that this status corresponds to - Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` -} - -// +genclient -// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch -// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamMapping represents a mapping from a single tag to a container image as -// well as the reference to the container image stream the image came from. -type ImageStreamMapping struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Image is a container image. - Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` - // Tag is a string value this image can be located with inside the stream. - Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` -} - -// +genclient -// +genclient:onlyVerbs=get,list,create,update,delete -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. -type ImageStreamTag struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // tag is the spec tag associated with this image stream tag, and it may be null - // if only pushes have occurred to this image stream. - Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"` - - // generation is the current generation of the tagged image - if tag is provided - // and this value is not equal to the tag generation, a user has requested an - // import that has not completed, or conditions will be filled out indicating any - // error. - Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"` - - // lookupPolicy indicates whether this tag will handle image references in this - // namespace. - LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"` - - // conditions is an array of conditions that apply to the image stream tag. - Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"` - - // image associated with the ImageStream and tag. - Image Image `json:"image" protobuf:"bytes,5,opt,name=image"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamTagList is a list of ImageStreamTag objects. -type ImageStreamTagList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of image stream tags - Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:onlyVerbs=get -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. -type ImageStreamImage struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Image associated with the ImageStream and image name. - Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` -} - -// DockerImageReference points to a container image. -type DockerImageReference struct { - // Registry is the registry that contains the container image - Registry string `protobuf:"bytes,1,opt,name=registry"` - // Namespace is the namespace that contains the container image - Namespace string `protobuf:"bytes,2,opt,name=namespace"` - // Name is the name of the container image - Name string `protobuf:"bytes,3,opt,name=name"` - // Tag is which tag of the container image is being referenced - Tag string `protobuf:"bytes,4,opt,name=tag"` - // ID is the identifier for the container image - ID string `protobuf:"bytes,5,opt,name=iD"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ImageStreamLayers describes information about the layers referenced by images in this -// image stream. -type ImageStreamLayers struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // blobs is a map of blob name to metadata about the blob. - Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` - // images is a map between an image name and the names of the blobs and config that - // comprise the image. - Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` -} - -// ImageBlobReferences describes the blob references within an image. -type ImageBlobReferences struct { - // imageMissing is true if the image is referenced by the image stream but the image - // object has been deleted from the API by an administrator. When this field is set, - // layers and config fields may be empty and callers that depend on the image metadata - // should consider the image to be unavailable for download or viewing. - // +optional - ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"` - // layers is the list of blobs that compose this image, from base layer to top layer. - // All layers referenced by this array will be defined in the blobs map. Some images - // may have zero layers. - // +optional - Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` - // config, if set, is the blob that contains the image config. Some images do - // not have separate config blobs and this field will be set to nil if so. - // +optional - Config *string `json:"config" protobuf:"bytes,2,opt,name=config"` -} - -// ImageLayerData contains metadata about an image layer. -type ImageLayerData struct { - // Size of the layer in bytes as defined by the underlying store. This field is - // optional if the necessary information about size is not available. - LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` - // MediaType of the referenced object. - MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` -} - -// +genclient -// +genclient:onlyVerbs=create -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// The image stream import resource provides an easy way for a user to find and import container images -// from other container image registries into the server. Individual images or an entire image repository may -// be imported, and users may choose to see the results of the import prior to tagging the resulting -// images into the specified image stream. -// -// This API is intended for end-user tools that need to see the metadata of the image prior to import -// (for instance, to generate an application from it). Clients that know the desired image can continue -// to create spec.tags directly into their image streams. -type ImageStreamImport struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is a description of the images that the user wishes to import - Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // Status is the the result of importing the image - Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` -} - -// ImageStreamImportSpec defines what images should be imported. -type ImageStreamImportSpec struct { - // Import indicates whether to perform an import - if so, the specified tags are set on the spec - // and status of the image stream defined by the type meta. - Import bool `json:"import" protobuf:"varint,1,opt,name=import"` - // Repository is an optional import of an entire container image repository. A maximum limit on the - // number of tags imported this way is imposed by the server. - Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images are a list of individual images to import. - Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` -} - -// ImageStreamImportStatus contains information about the status of an image stream import. -type ImageStreamImportStatus struct { - // Import is the image stream that was successfully updated or created when 'to' was set. - Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` - // Repository is set if spec.repository was set to the outcome of the import - Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` - // Images is set with the result of importing spec.images - Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` -} - -// RepositoryImportSpec describes a request to import images from a container image repository. -type RepositoryImportSpec struct { - // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed - From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - - // ImportPolicy is the policy controlling how the image is imported - ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image - ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response - IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` -} - -// RepositoryImportStatus describes the result of an image repository import -type RepositoryImportStatus struct { - // Status reflects whether any failure occurred during import - Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` - // Images is a list of images successfully retrieved by the import of the repository. - Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` - // AdditionalTags are tags that exist in the repository but were not imported because - // a maximum limit of automatic imports was applied. - AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` -} - -// ImageImportSpec describes a request to import a specific image. -type ImageImportSpec struct { - // From is the source of an image to import; only kind DockerImage is allowed - From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` - // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used - To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` - - // ImportPolicy is the policy controlling how the image is imported - ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` - // ReferencePolicy defines how other components should consume the image - ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` - // IncludeManifest determines if the manifest for each image is returned in the response - IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` -} - -// ImageImportStatus describes the result of an image import. -type ImageImportStatus struct { - // Status is the status of the image import, including errors encountered while retrieving the image - Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` - // Image is the metadata of that image, if the image was located - Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Tag is the tag this image was located under, if any - Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` -} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go deleted file mode 100644 index cbde157c64..0000000000 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,904 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference. -func (in *DockerImageReference) DeepCopy() *DockerImageReference { - if in == nil { - return nil - } - out := new(DockerImageReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Image) DeepCopyInto(out *Image) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata) - if in.DockerImageLayers != nil { - in, out := &in.DockerImageLayers, &out.DockerImageLayers - *out = make([]ImageLayer, len(*in)) - copy(*out, *in) - } - if in.Signatures != nil { - in, out := &in.Signatures, &out.Signatures - *out = make([]ImageSignature, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DockerImageSignatures != nil { - in, out := &in.DockerImageSignatures, &out.DockerImageSignatures - *out = make([][]byte, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = make([]byte, len(*in)) - copy(*out, *in) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. -func (in *Image) DeepCopy() *Image { - if in == nil { - return nil - } - out := new(Image) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Image) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { - *out = *in - if in.Layers != nil { - in, out := &in.Layers, &out.Layers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. -func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { - if in == nil { - return nil - } - out := new(ImageBlobReferences) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { - *out = *in - out.From = in.From - if in.To != nil { - in, out := &in.To, &out.To - *out = new(corev1.LocalObjectReference) - **out = **in - } - out.ImportPolicy = in.ImportPolicy - out.ReferencePolicy = in.ReferencePolicy - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec. -func (in *ImageImportSpec) DeepCopy() *ImageImportSpec { - if in == nil { - return nil - } - out := new(ImageImportSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - if in.Image != nil { - in, out := &in.Image, &out.Image - *out = new(Image) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus. -func (in *ImageImportStatus) DeepCopy() *ImageImportStatus { - if in == nil { - return nil - } - out := new(ImageImportStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageLayer) DeepCopyInto(out *ImageLayer) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer. -func (in *ImageLayer) DeepCopy() *ImageLayer { - if in == nil { - return nil - } - out := new(ImageLayer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { - *out = *in - if in.LayerSize != nil { - in, out := &in.LayerSize, &out.LayerSize - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. -func (in *ImageLayerData) DeepCopy() *ImageLayerData { - if in == nil { - return nil - } - out := new(ImageLayerData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageList) DeepCopyInto(out *ImageList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Image, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. -func (in *ImageList) DeepCopy() *ImageList { - if in == nil { - return nil - } - out := new(ImageList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy. -func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy { - if in == nil { - return nil - } - out := new(ImageLookupPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageSignature) DeepCopyInto(out *ImageSignature) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Content != nil { - in, out := &in.Content, &out.Content - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]SignatureCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.SignedClaims != nil { - in, out := &in.SignedClaims, &out.SignedClaims - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Created != nil { - in, out := &in.Created, &out.Created - *out = (*in).DeepCopy() - } - if in.IssuedBy != nil { - in, out := &in.IssuedBy, &out.IssuedBy - *out = new(SignatureIssuer) - **out = **in - } - if in.IssuedTo != nil { - in, out := &in.IssuedTo, &out.IssuedTo - *out = new(SignatureSubject) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature. -func (in *ImageSignature) DeepCopy() *ImageSignature { - if in == nil { - return nil - } - out := new(ImageSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageSignature) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStream) DeepCopyInto(out *ImageStream) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream. -func (in *ImageStream) DeepCopy() *ImageStream { - if in == nil { - return nil - } - out := new(ImageStream) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStream) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Image.DeepCopyInto(&out.Image) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage. -func (in *ImageStreamImage) DeepCopy() *ImageStreamImage { - if in == nil { - return nil - } - out := new(ImageStreamImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamImage) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport. -func (in *ImageStreamImport) DeepCopy() *ImageStreamImport { - if in == nil { - return nil - } - out := new(ImageStreamImport) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamImport) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) { - *out = *in - if in.Repository != nil { - in, out := &in.Repository, &out.Repository - *out = new(RepositoryImportSpec) - **out = **in - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ImageImportSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec. -func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec { - if in == nil { - return nil - } - out := new(ImageStreamImportSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) { - *out = *in - if in.Import != nil { - in, out := &in.Import, &out.Import - *out = new(ImageStream) - (*in).DeepCopyInto(*out) - } - if in.Repository != nil { - in, out := &in.Repository, &out.Repository - *out = new(RepositoryImportStatus) - (*in).DeepCopyInto(*out) - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ImageImportStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus. -func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { - if in == nil { - return nil - } - out := new(ImageStreamImportStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Blobs != nil { - in, out := &in.Blobs, &out.Blobs - *out = make(map[string]ImageLayerData, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make(map[string]ImageBlobReferences, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. -func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { - if in == nil { - return nil - } - out := new(ImageStreamLayers) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ImageStream, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList. -func (in *ImageStreamList) DeepCopy() *ImageStreamList { - if in == nil { - return nil - } - out := new(ImageStreamList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Image.DeepCopyInto(&out.Image) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping. -func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping { - if in == nil { - return nil - } - out := new(ImageStreamMapping) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamMapping) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) { - *out = *in - out.LookupPolicy = in.LookupPolicy - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]TagReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec. -func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec { - if in == nil { - return nil - } - out := new(ImageStreamSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) { - *out = *in - if in.Tags != nil { - in, out := &in.Tags, &out.Tags - *out = make([]NamedTagEventList, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus. -func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus { - if in == nil { - return nil - } - out := new(ImageStreamStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Tag != nil { - in, out := &in.Tag, &out.Tag - *out = new(TagReference) - (*in).DeepCopyInto(*out) - } - out.LookupPolicy = in.LookupPolicy - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]TagEventCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Image.DeepCopyInto(&out.Image) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag. -func (in *ImageStreamTag) DeepCopy() *ImageStreamTag { - if in == nil { - return nil - } - out := new(ImageStreamTag) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamTag) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ImageStreamTag, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList. -func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList { - if in == nil { - return nil - } - out := new(ImageStreamTagList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ImageStreamTagList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]TagEvent, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]TagEventCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList. -func (in *NamedTagEventList) DeepCopy() *NamedTagEventList { - if in == nil { - return nil - } - out := new(NamedTagEventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) { - *out = *in - out.From = in.From - out.ImportPolicy = in.ImportPolicy - out.ReferencePolicy = in.ReferencePolicy - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec. -func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec { - if in == nil { - return nil - } - out := new(RepositoryImportSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) { - *out = *in - in.Status.DeepCopyInto(&out.Status) - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ImageImportStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AdditionalTags != nil { - in, out := &in.AdditionalTags, &out.AdditionalTags - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus. -func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus { - if in == nil { - return nil - } - out := new(RepositoryImportStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition. -func (in *SignatureCondition) DeepCopy() *SignatureCondition { - if in == nil { - return nil - } - out := new(SignatureCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity. -func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity { - if in == nil { - return nil - } - out := new(SignatureGenericEntity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) { - *out = *in - out.SignatureGenericEntity = in.SignatureGenericEntity - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer. -func (in *SignatureIssuer) DeepCopy() *SignatureIssuer { - if in == nil { - return nil - } - out := new(SignatureIssuer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) { - *out = *in - out.SignatureGenericEntity = in.SignatureGenericEntity - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject. -func (in *SignatureSubject) DeepCopy() *SignatureSubject { - if in == nil { - return nil - } - out := new(SignatureSubject) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagEvent) DeepCopyInto(out *TagEvent) { - *out = *in - in.Created.DeepCopyInto(&out.Created) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent. -func (in *TagEvent) DeepCopy() *TagEvent { - if in == nil { - return nil - } - out := new(TagEvent) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition. -func (in *TagEventCondition) DeepCopy() *TagEventCondition { - if in == nil { - return nil - } - out := new(TagEventCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy. -func (in *TagImportPolicy) DeepCopy() *TagImportPolicy { - if in == nil { - return nil - } - out := new(TagImportPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagReference) DeepCopyInto(out *TagReference) { - *out = *in - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.From != nil { - in, out := &in.From, &out.From - *out = new(corev1.ObjectReference) - **out = **in - } - if in.Generation != nil { - in, out := &in.Generation, &out.Generation - *out = new(int64) - **out = **in - } - out.ImportPolicy = in.ImportPolicy - out.ReferencePolicy = in.ReferencePolicy - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference. -func (in *TagReference) DeepCopy() *TagReference { - if in == nil { - return nil - } - out := new(TagReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy. -func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy { - if in == nil { - return nil - } - out := new(TagReferencePolicy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index 7815909872..0000000000 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,404 +0,0 @@ -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_DockerImageReference = map[string]string{ - "": "DockerImageReference points to a container image.", - "Registry": "Registry is the registry that contains the container image", - "Namespace": "Namespace is the namespace that contains the container image", - "Name": "Name is the name of the container image", - "Tag": "Tag is which tag of the container image is being referenced", - "ID": "ID is the identifier for the container image", -} - -func (DockerImageReference) SwaggerDoc() map[string]string { - return map_DockerImageReference -} - -var map_Image = map[string]string{ - "": "Image is an immutable representation of a container image and metadata at a point in time.", - "metadata": "Standard object's metadata.", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", - "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", - "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", - "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", - "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data.", - "signatures": "Signatures holds all signatures of the image.", - "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", - "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", - "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2.", -} - -func (Image) SwaggerDoc() map[string]string { - return map_Image -} - -var map_ImageBlobReferences = map[string]string{ - "": "ImageBlobReferences describes the blob references within an image.", - "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.", - "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", - "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.", -} - -func (ImageBlobReferences) SwaggerDoc() map[string]string { - return map_ImageBlobReferences -} - -var map_ImageImportSpec = map[string]string{ - "": "ImageImportSpec describes a request to import a specific image.", - "from": "From is the source of an image to import; only kind DockerImage is allowed", - "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", -} - -func (ImageImportSpec) SwaggerDoc() map[string]string { - return map_ImageImportSpec -} - -var map_ImageImportStatus = map[string]string{ - "": "ImageImportStatus describes the result of an image import.", - "status": "Status is the status of the image import, including errors encountered while retrieving the image", - "image": "Image is the metadata of that image, if the image was located", - "tag": "Tag is the tag this image was located under, if any", -} - -func (ImageImportStatus) SwaggerDoc() map[string]string { - return map_ImageImportStatus -} - -var map_ImageLayer = map[string]string{ - "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", - "name": "Name of the layer as defined by the underlying store.", - "size": "Size of the layer in bytes as defined by the underlying store.", - "mediaType": "MediaType of the referenced object.", -} - -func (ImageLayer) SwaggerDoc() map[string]string { - return map_ImageLayer -} - -var map_ImageLayerData = map[string]string{ - "": "ImageLayerData contains metadata about an image layer.", - "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", - "mediaType": "MediaType of the referenced object.", -} - -func (ImageLayerData) SwaggerDoc() map[string]string { - return map_ImageLayerData -} - -var map_ImageList = map[string]string{ - "": "ImageList is a list of Image objects.", - "metadata": "Standard object's metadata.", - "items": "Items is a list of images", -} - -func (ImageList) SwaggerDoc() map[string]string { - return map_ImageList -} - -var map_ImageLookupPolicy = map[string]string{ - "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.", - "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.", -} - -func (ImageLookupPolicy) SwaggerDoc() map[string]string { - return map_ImageLookupPolicy -} - -var map_ImageSignature = map[string]string{ - "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.", - "metadata": "Standard object's metadata.", - "type": "Required: Describes a type of stored blob.", - "content": "Required: An opaque binary string which is an image's signature.", - "conditions": "Conditions represent the latest available observations of a signature's current state.", - "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", - "signedClaims": "Contains claims from the signature.", - "created": "If specified, it is the time of signature's creation.", - "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).", - "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).", -} - -func (ImageSignature) SwaggerDoc() map[string]string { - return map_ImageSignature -} - -var map_ImageStream = map[string]string{ - "": "ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry.", - "metadata": "Standard object's metadata.", - "spec": "Spec describes the desired state of this stream", - "status": "Status describes the current state of this stream", -} - -func (ImageStream) SwaggerDoc() map[string]string { - return map_ImageStream -} - -var map_ImageStreamImage = map[string]string{ - "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream.", - "metadata": "Standard object's metadata.", - "image": "Image associated with the ImageStream and image name.", -} - -func (ImageStreamImage) SwaggerDoc() map[string]string { - return map_ImageStreamImage -} - -var map_ImageStreamImport = map[string]string{ - "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.", - "metadata": "Standard object's metadata.", - "spec": "Spec is a description of the images that the user wishes to import", - "status": "Status is the the result of importing the image", -} - -func (ImageStreamImport) SwaggerDoc() map[string]string { - return map_ImageStreamImport -} - -var map_ImageStreamImportSpec = map[string]string{ - "": "ImageStreamImportSpec defines what images should be imported.", - "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", - "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", - "images": "Images are a list of individual images to import.", -} - -func (ImageStreamImportSpec) SwaggerDoc() map[string]string { - return map_ImageStreamImportSpec -} - -var map_ImageStreamImportStatus = map[string]string{ - "": "ImageStreamImportStatus contains information about the status of an image stream import.", - "import": "Import is the image stream that was successfully updated or created when 'to' was set.", - "repository": "Repository is set if spec.repository was set to the outcome of the import", - "images": "Images is set with the result of importing spec.images", -} - -func (ImageStreamImportStatus) SwaggerDoc() map[string]string { - return map_ImageStreamImportStatus -} - -var map_ImageStreamLayers = map[string]string{ - "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.", - "metadata": "Standard object's metadata.", - "blobs": "blobs is a map of blob name to metadata about the blob.", - "images": "images is a map between an image name and the names of the blobs and config that comprise the image.", -} - -func (ImageStreamLayers) SwaggerDoc() map[string]string { - return map_ImageStreamLayers -} - -var map_ImageStreamList = map[string]string{ - "": "ImageStreamList is a list of ImageStream objects.", - "metadata": "Standard object's metadata.", - "items": "Items is a list of imageStreams", -} - -func (ImageStreamList) SwaggerDoc() map[string]string { - return map_ImageStreamList -} - -var map_ImageStreamMapping = map[string]string{ - "": "ImageStreamMapping represents a mapping from a single tag to a container image as well as the reference to the container image stream the image came from.", - "metadata": "Standard object's metadata.", - "image": "Image is a container image.", - "tag": "Tag is a string value this image can be located with inside the stream.", -} - -func (ImageStreamMapping) SwaggerDoc() map[string]string { - return map_ImageStreamMapping -} - -var map_ImageStreamSpec = map[string]string{ - "": "ImageStreamSpec represents options for ImageStreams.", - "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.", - "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.", - "tags": "tags map arbitrary string values to specific image locators", -} - -func (ImageStreamSpec) SwaggerDoc() map[string]string { - return map_ImageStreamSpec -} - -var map_ImageStreamStatus = map[string]string{ - "": "ImageStreamStatus contains information about the state of this image stream.", - "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", - "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", - "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", -} - -func (ImageStreamStatus) SwaggerDoc() map[string]string { - return map_ImageStreamStatus -} - -var map_ImageStreamTag = map[string]string{ - "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream.", - "metadata": "Standard object's metadata.", - "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", - "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.", - "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.", - "conditions": "conditions is an array of conditions that apply to the image stream tag.", - "image": "image associated with the ImageStream and tag.", -} - -func (ImageStreamTag) SwaggerDoc() map[string]string { - return map_ImageStreamTag -} - -var map_ImageStreamTagList = map[string]string{ - "": "ImageStreamTagList is a list of ImageStreamTag objects.", - "metadata": "Standard object's metadata.", - "items": "Items is the list of image stream tags", -} - -func (ImageStreamTagList) SwaggerDoc() map[string]string { - return map_ImageStreamTagList -} - -var map_NamedTagEventList = map[string]string{ - "": "NamedTagEventList relates a tag to its image history.", - "tag": "Tag is the tag for which the history is recorded", - "items": "Standard object's metadata.", - "conditions": "Conditions is an array of conditions that apply to the tag event list.", -} - -func (NamedTagEventList) SwaggerDoc() map[string]string { - return map_NamedTagEventList -} - -var map_RepositoryImportSpec = map[string]string{ - "": "RepositoryImportSpec describes a request to import images from a container image repository.", - "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", - "importPolicy": "ImportPolicy is the policy controlling how the image is imported", - "referencePolicy": "ReferencePolicy defines how other components should consume the image", - "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", -} - -func (RepositoryImportSpec) SwaggerDoc() map[string]string { - return map_RepositoryImportSpec -} - -var map_RepositoryImportStatus = map[string]string{ - "": "RepositoryImportStatus describes the result of an image repository import", - "status": "Status reflects whether any failure occurred during import", - "images": "Images is a list of images successfully retrieved by the import of the repository.", - "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", -} - -func (RepositoryImportStatus) SwaggerDoc() map[string]string { - return map_RepositoryImportStatus -} - -var map_SignatureCondition = map[string]string{ - "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", - "type": "Type of signature condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (SignatureCondition) SwaggerDoc() map[string]string { - return map_SignatureCondition -} - -var map_SignatureGenericEntity = map[string]string{ - "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", - "organization": "Organization name.", - "commonName": "Common name (e.g. openshift-signing-service).", -} - -func (SignatureGenericEntity) SwaggerDoc() map[string]string { - return map_SignatureGenericEntity -} - -var map_SignatureIssuer = map[string]string{ - "": "SignatureIssuer holds information about an issuer of signing certificate or key.", -} - -func (SignatureIssuer) SwaggerDoc() map[string]string { - return map_SignatureIssuer -} - -var map_SignatureSubject = map[string]string{ - "": "SignatureSubject holds information about a person or entity who created the signature.", - "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).", -} - -func (SignatureSubject) SwaggerDoc() map[string]string { - return map_SignatureSubject -} - -var map_TagEvent = map[string]string{ - "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", - "created": "Created holds the time the TagEvent was created", - "dockerImageReference": "DockerImageReference is the string that can be used to pull this image", - "image": "Image is the image", - "generation": "Generation is the spec tag generation that resulted in this tag being updated", -} - -func (TagEvent) SwaggerDoc() map[string]string { - return map_TagEvent -} - -var map_TagEventCondition = map[string]string{ - "": "TagEventCondition contains condition information for a tag event.", - "type": "Type of tag event condition, currently only ImportSuccess", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.", - "reason": "Reason is a brief machine readable explanation for the condition's last transition.", - "message": "Message is a human readable description of the details about last transition, complementing reason.", - "generation": "Generation is the spec tag generation that this status corresponds to", -} - -func (TagEventCondition) SwaggerDoc() map[string]string { - return map_TagEventCondition -} - -var map_TagImportPolicy = map[string]string{ - "": "TagImportPolicy controls how images related to this tag will be imported.", - "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", - "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", -} - -func (TagImportPolicy) SwaggerDoc() map[string]string { - return map_TagImportPolicy -} - -var map_TagReference = map[string]string{ - "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", - "name": "Name of the tag", - "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", - "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", - "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", - "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", - "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.", - "referencePolicy": "ReferencePolicy defines how other components should consume the image.", -} - -func (TagReference) SwaggerDoc() map[string]string { - return map_TagReference -} - -var map_TagReferencePolicy = map[string]string{ - "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", - "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", -} - -func (TagReferencePolicy) SwaggerDoc() map[string]string { - return map_TagReferencePolicy -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go new file mode 100644 index 0000000000..544faaaead --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go @@ -0,0 +1,30 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package config + +import ( + v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go new file mode 100644 index 0000000000..cc1bb98522 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/apiserver.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// APIServerInformer provides access to a shared informer and lister for +// APIServers. +type APIServerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.APIServerLister +} + +type aPIServerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAPIServerInformer constructs a new informer for APIServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAPIServerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAPIServerInformer constructs a new informer for APIServer type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAPIServerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().APIServers().Watch(options) + }, + }, + &configv1.APIServer{}, + resyncPeriod, + indexers, + ) +} + +func (f *aPIServerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAPIServerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *aPIServerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.APIServer{}, f.defaultInformer) +} + +func (f *aPIServerInformer) Lister() v1.APIServerLister { + return v1.NewAPIServerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go new file mode 100644 index 0000000000..3afef220e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/authentication.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// AuthenticationInformer provides access to a shared informer and lister for +// Authentications. +type AuthenticationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.AuthenticationLister +} + +type authenticationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewAuthenticationInformer constructs a new informer for Authentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredAuthenticationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredAuthenticationInformer constructs a new informer for Authentication type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredAuthenticationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Authentications().Watch(options) + }, + }, + &configv1.Authentication{}, + resyncPeriod, + indexers, + ) +} + +func (f *authenticationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredAuthenticationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *authenticationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Authentication{}, f.defaultInformer) +} + +func (f *authenticationInformer) Lister() v1.AuthenticationLister { + return v1.NewAuthenticationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go new file mode 100644 index 0000000000..49814de848 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/build.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BuildInformer provides access to a shared informer and lister for +// Builds. +type BuildInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BuildLister +} + +type buildInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredBuildInformer constructs a new informer for Build type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBuildInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Builds().Watch(options) + }, + }, + &configv1.Build{}, + resyncPeriod, + indexers, + ) +} + +func (f *buildInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBuildInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *buildInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Build{}, f.defaultInformer) +} + +func (f *buildInformer) Lister() v1.BuildLister { + return v1.NewBuildLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go new file mode 100644 index 0000000000..f23c05bb9a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusteroperator.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterOperatorInformer provides access to a shared informer and lister for +// ClusterOperators. +type ClusterOperatorInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterOperatorLister +} + +type clusterOperatorInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterOperatorInformer constructs a new informer for ClusterOperator type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterOperatorInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterOperators().Watch(options) + }, + }, + &configv1.ClusterOperator{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterOperatorInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterOperatorInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.ClusterOperator{}, f.defaultInformer) +} + +func (f *clusterOperatorInformer) Lister() v1.ClusterOperatorLister { + return v1.NewClusterOperatorLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go new file mode 100644 index 0000000000..906b72cd68 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/clusterversion.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterVersionInformer provides access to a shared informer and lister for +// ClusterVersions. +type ClusterVersionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterVersionLister +} + +type clusterVersionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterVersionInformer constructs a new informer for ClusterVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterVersionInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterVersionInformer constructs a new informer for ClusterVersion type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterVersionInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().ClusterVersions().Watch(options) + }, + }, + &configv1.ClusterVersion{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterVersionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterVersionInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterVersionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.ClusterVersion{}, f.defaultInformer) +} + +func (f *clusterVersionInformer) Lister() v1.ClusterVersionLister { + return v1.NewClusterVersionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go new file mode 100644 index 0000000000..d91d50a280 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/console.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ConsoleInformer provides access to a shared informer and lister for +// Consoles. +type ConsoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ConsoleLister +} + +type consoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewConsoleInformer constructs a new informer for Console type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConsoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredConsoleInformer constructs a new informer for Console type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConsoleInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Consoles().Watch(options) + }, + }, + &configv1.Console{}, + resyncPeriod, + indexers, + ) +} + +func (f *consoleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConsoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *consoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Console{}, f.defaultInformer) +} + +func (f *consoleInformer) Lister() v1.ConsoleLister { + return v1.NewConsoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go new file mode 100644 index 0000000000..68bf545229 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/dns.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DNSInformer provides access to a shared informer and lister for +// DNSes. +type DNSInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DNSLister +} + +type dNSInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewDNSInformer constructs a new informer for DNS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDNSInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredDNSInformer constructs a new informer for DNS type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDNSInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().DNSes().Watch(options) + }, + }, + &configv1.DNS{}, + resyncPeriod, + indexers, + ) +} + +func (f *dNSInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDNSInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *dNSInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.DNS{}, f.defaultInformer) +} + +func (f *dNSInformer) Lister() v1.DNSLister { + return v1.NewDNSLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go new file mode 100644 index 0000000000..1adb3762c6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// FeatureGateInformer provides access to a shared informer and lister for +// FeatureGates. +type FeatureGateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.FeatureGateLister +} + +type featureGateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewFeatureGateInformer constructs a new informer for FeatureGate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredFeatureGateInformer constructs a new informer for FeatureGate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().FeatureGates().Watch(options) + }, + }, + &configv1.FeatureGate{}, + resyncPeriod, + indexers, + ) +} + +func (f *featureGateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *featureGateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.FeatureGate{}, f.defaultInformer) +} + +func (f *featureGateInformer) Lister() v1.FeatureGateLister { + return v1.NewFeatureGateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go new file mode 100644 index 0000000000..a0869fd6fd --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/image.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ImageInformer provides access to a shared informer and lister for +// Images. +type ImageInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ImageLister +} + +type imageInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredImageInformer constructs a new informer for Image type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredImageInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Images().Watch(options) + }, + }, + &configv1.Image{}, + resyncPeriod, + indexers, + ) +} + +func (f *imageInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredImageInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *imageInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Image{}, f.defaultInformer) +} + +func (f *imageInformer) Lister() v1.ImageLister { + return v1.NewImageLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go new file mode 100644 index 0000000000..8e55560d30 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/infrastructure.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InfrastructureInformer provides access to a shared informer and lister for +// Infrastructures. +type InfrastructureInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.InfrastructureLister +} + +type infrastructureInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInfrastructureInformer constructs a new informer for Infrastructure type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInfrastructureInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInfrastructureInformer constructs a new informer for Infrastructure type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInfrastructureInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Infrastructures().Watch(options) + }, + }, + &configv1.Infrastructure{}, + resyncPeriod, + indexers, + ) +} + +func (f *infrastructureInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInfrastructureInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *infrastructureInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Infrastructure{}, f.defaultInformer) +} + +func (f *infrastructureInformer) Lister() v1.InfrastructureLister { + return v1.NewInfrastructureLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go new file mode 100644 index 0000000000..771eaf230b --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/ingress.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Ingresses().Watch(options) + }, + }, + &configv1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() v1.IngressLister { + return v1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go new file mode 100644 index 0000000000..3b6b8a38a1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -0,0 +1,134 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // APIServers returns a APIServerInformer. + APIServers() APIServerInformer + // Authentications returns a AuthenticationInformer. + Authentications() AuthenticationInformer + // Builds returns a BuildInformer. + Builds() BuildInformer + // ClusterOperators returns a ClusterOperatorInformer. + ClusterOperators() ClusterOperatorInformer + // ClusterVersions returns a ClusterVersionInformer. + ClusterVersions() ClusterVersionInformer + // Consoles returns a ConsoleInformer. + Consoles() ConsoleInformer + // DNSes returns a DNSInformer. + DNSes() DNSInformer + // FeatureGates returns a FeatureGateInformer. + FeatureGates() FeatureGateInformer + // Images returns a ImageInformer. + Images() ImageInformer + // Infrastructures returns a InfrastructureInformer. + Infrastructures() InfrastructureInformer + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer + // Networks returns a NetworkInformer. + Networks() NetworkInformer + // OAuths returns a OAuthInformer. + OAuths() OAuthInformer + // Projects returns a ProjectInformer. + Projects() ProjectInformer + // Proxies returns a ProxyInformer. + Proxies() ProxyInformer + // Schedulers returns a SchedulerInformer. + Schedulers() SchedulerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// APIServers returns a APIServerInformer. +func (v *version) APIServers() APIServerInformer { + return &aPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Authentications returns a AuthenticationInformer. +func (v *version) Authentications() AuthenticationInformer { + return &authenticationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Builds returns a BuildInformer. +func (v *version) Builds() BuildInformer { + return &buildInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterOperators returns a ClusterOperatorInformer. +func (v *version) ClusterOperators() ClusterOperatorInformer { + return &clusterOperatorInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterVersions returns a ClusterVersionInformer. +func (v *version) ClusterVersions() ClusterVersionInformer { + return &clusterVersionInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Consoles returns a ConsoleInformer. +func (v *version) Consoles() ConsoleInformer { + return &consoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// DNSes returns a DNSInformer. +func (v *version) DNSes() DNSInformer { + return &dNSInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// FeatureGates returns a FeatureGateInformer. +func (v *version) FeatureGates() FeatureGateInformer { + return &featureGateInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Images returns a ImageInformer. +func (v *version) Images() ImageInformer { + return &imageInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Infrastructures returns a InfrastructureInformer. +func (v *version) Infrastructures() InfrastructureInformer { + return &infrastructureInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Networks returns a NetworkInformer. +func (v *version) Networks() NetworkInformer { + return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// OAuths returns a OAuthInformer. +func (v *version) OAuths() OAuthInformer { + return &oAuthInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Projects returns a ProjectInformer. +func (v *version) Projects() ProjectInformer { + return &projectInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Proxies returns a ProxyInformer. +func (v *version) Proxies() ProxyInformer { + return &proxyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Schedulers returns a SchedulerInformer. +func (v *version) Schedulers() SchedulerInformer { + return &schedulerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go new file mode 100644 index 0000000000..bc4d0d39d4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/network.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkInformer provides access to a shared informer and lister for +// Networks. +type NetworkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NetworkLister +} + +type networkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Networks().Watch(options) + }, + }, + &configv1.Network{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Network{}, f.defaultInformer) +} + +func (f *networkInformer) Lister() v1.NetworkLister { + return v1.NewNetworkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go new file mode 100644 index 0000000000..78c4a0ce0d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/oauth.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// OAuthInformer provides access to a shared informer and lister for +// OAuths. +type OAuthInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.OAuthLister +} + +type oAuthInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewOAuthInformer constructs a new informer for OAuth type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredOAuthInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredOAuthInformer constructs a new informer for OAuth type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredOAuthInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().OAuths().Watch(options) + }, + }, + &configv1.OAuth{}, + resyncPeriod, + indexers, + ) +} + +func (f *oAuthInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredOAuthInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *oAuthInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.OAuth{}, f.defaultInformer) +} + +func (f *oAuthInformer) Lister() v1.OAuthLister { + return v1.NewOAuthLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go new file mode 100644 index 0000000000..da097e064c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/project.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ProjectInformer provides access to a shared informer and lister for +// Projects. +type ProjectInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ProjectLister +} + +type projectInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewProjectInformer constructs a new informer for Project type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredProjectInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredProjectInformer constructs a new informer for Project type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredProjectInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Projects().Watch(options) + }, + }, + &configv1.Project{}, + resyncPeriod, + indexers, + ) +} + +func (f *projectInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredProjectInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *projectInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Project{}, f.defaultInformer) +} + +func (f *projectInformer) Lister() v1.ProjectLister { + return v1.NewProjectLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go new file mode 100644 index 0000000000..22360d2a05 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/proxy.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ProxyInformer provides access to a shared informer and lister for +// Proxies. +type ProxyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ProxyLister +} + +type proxyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewProxyInformer constructs a new informer for Proxy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredProxyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredProxyInformer constructs a new informer for Proxy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredProxyInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Proxies().Watch(options) + }, + }, + &configv1.Proxy{}, + resyncPeriod, + indexers, + ) +} + +func (f *proxyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredProxyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *proxyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Proxy{}, f.defaultInformer) +} + +func (f *proxyInformer) Lister() v1.ProxyLister { + return v1.NewProxyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go new file mode 100644 index 0000000000..87a5780f03 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/scheduler.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + configv1 "github.com/openshift/api/config/v1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/config/listers/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// SchedulerInformer provides access to a shared informer and lister for +// Schedulers. +type SchedulerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.SchedulerLister +} + +type schedulerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewSchedulerInformer constructs a new informer for Scheduler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSchedulerInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredSchedulerInformer constructs a new informer for Scheduler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSchedulerInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1().Schedulers().Watch(options) + }, + }, + &configv1.Scheduler{}, + resyncPeriod, + indexers, + ) +} + +func (f *schedulerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSchedulerInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *schedulerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1.Scheduler{}, f.defaultInformer) +} + +func (f *schedulerInformer) Lister() v1.SchedulerLister { + return v1.NewSchedulerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go new file mode 100644 index 0000000000..ff9a302a22 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/factory.go @@ -0,0 +1,164 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + reflect "reflect" + sync "sync" + time "time" + + versioned "github.com/openshift/client-go/config/clientset/versioned" + config "github.com/openshift/client-go/config/informers/externalversions/config" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// SharedInformerOption defines the functional option type for SharedInformerFactory. +type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory + +type sharedInformerFactory struct { + client versioned.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + customResync map[reflect.Type]time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// WithCustomResyncConfig sets a custom resync period for the specified informer types. +func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + for k, v := range resyncConfig { + factory.customResync[reflect.TypeOf(k)] = v + } + return factory + } +} + +// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory. +func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.tweakListOptions = tweakListOptions + return factory + } +} + +// WithNamespace limits the SharedInformerFactory to the specified namespace. +func WithNamespace(namespace string) SharedInformerOption { + return func(factory *sharedInformerFactory) *sharedInformerFactory { + factory.namespace = namespace + return factory + } +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces. +func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +// Deprecated: Please use NewSharedInformerFactoryWithOptions instead +func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions)) +} + +// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options. +func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory { + factory := &sharedInformerFactory{ + client: client, + namespace: v1.NamespaceAll, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + customResync: make(map[reflect.Type]time.Duration), + } + + // Apply all options + for _, opt := range options { + factory = opt(factory) + } + + return factory +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + + resyncPeriod, exists := f.customResync[informerType] + if !exists { + resyncPeriod = f.defaultResync + } + + informer = newFunc(f.client, resyncPeriod) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Config() config.Interface +} + +func (f *sharedInformerFactory) Config() config.Interface { + return config.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go new file mode 100644 index 0000000000..248b391019 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -0,0 +1,76 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package externalversions + +import ( + "fmt" + + v1 "github.com/openshift/api/config/v1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=config.openshift.io, Version=v1 + case v1.SchemeGroupVersion.WithResource("apiservers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().APIServers().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("authentications"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Authentications().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("builds"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Builds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusteroperators"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterOperators().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("clusterversions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().ClusterVersions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("consoles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Consoles().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("dnses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().DNSes().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("featuregates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().FeatureGates().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("images"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Images().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("infrastructures"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Infrastructures().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Ingresses().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("networks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Networks().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("oauths"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().OAuths().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("projects"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Projects().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("proxies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Proxies().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("schedulers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Schedulers().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go new file mode 100644 index 0000000000..720235c485 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -0,0 +1,24 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package internalinterfaces + +import ( + time "time" + + versioned "github.com/openshift/client-go/config/clientset/versioned" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + cache "k8s.io/client-go/tools/cache" +) + +// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. +type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +// TweakListOptionsFunc is a function that transforms a v1.ListOptions. +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go index fa144a1f62..e7fbb4b7fe 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go @@ -39,12 +39,6 @@ const ( // Machine is the Schema for the machines API // +k8s:openapi-gen=true // +kubebuilder:subresource:status -// +kubebuilder:printcolumn:name="Instance",type="string",JSONPath=".status.providerStatus.instanceId",description="Instance ID of machine created in AWS" -// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.providerStatus.instanceState",description="State of the AWS instance" -// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.providerSpec.value.instanceType",description="Type of instance" -// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".spec.providerSpec.value.placement.region",description="Region associated with machine" -// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".spec.providerSpec.value.placement.availabilityZone",description="Zone associated with machine" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age" type Machine struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go index 11fbe43c7c..ee5ed9855b 100644 --- a/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go +++ b/vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go @@ -35,11 +35,6 @@ import ( // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector -// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" -// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" -// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" -// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" type MachineSet struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go deleted file mode 100644 index ed6fbeb169..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apiext.go +++ /dev/null @@ -1,27 +0,0 @@ -package resourceread - -import ( - apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - apiExtensionsScheme = runtime.NewScheme() - apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme) -) - -func init() { - if err := apiextv1beta1.AddToScheme(apiExtensionsScheme); err != nil { - panic(err) - } -} - -// ReadCustomResourceDefinitionV1Beta1OrDie reads crd object from bytes. Panics on error. -func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextv1beta1.CustomResourceDefinition { - requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextv1beta1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*apiextv1beta1.CustomResourceDefinition) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go deleted file mode 100644 index f8915b50ef..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apireg.go +++ /dev/null @@ -1,31 +0,0 @@ -package resourceread - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - apiregv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" - apiregv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" -) - -var ( - apiRegScheme = runtime.NewScheme() - apiRegCodecs = serializer.NewCodecFactory(apiRegScheme) -) - -func init() { - if err := apiregv1beta1.AddToScheme(apiRegScheme); err != nil { - panic(err) - } - if err := apiregv1.AddToScheme(apiRegScheme); err != nil { - panic(err) - } -} - -// ReadAPIServiceV1OrDie reads aiservice object from bytes. Panics on error. -func ReadAPIServiceV1OrDie(objBytes []byte) *apiregv1.APIService { - requiredObj, err := runtime.Decode(apiRegCodecs.UniversalDecoder(apiregv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*apiregv1.APIService) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go deleted file mode 100644 index e62c12e317..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/apps.go +++ /dev/null @@ -1,36 +0,0 @@ -package resourceread - -import ( - appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - appsScheme = runtime.NewScheme() - appsCodecs = serializer.NewCodecFactory(appsScheme) -) - -func init() { - if err := appsv1.AddToScheme(appsScheme); err != nil { - panic(err) - } -} - -// ReadDeploymentV1OrDie reads deployment object from bytes. Panics on error. -func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment { - requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*appsv1.Deployment) -} - -// ReadDaemonSetV1OrDie reads daemonset object from bytes. Panics on error. -func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet { - requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*appsv1.DaemonSet) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go deleted file mode 100644 index e5f5a4a581..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/batch.go +++ /dev/null @@ -1,27 +0,0 @@ -package resourceread - -import ( - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - batchScheme = runtime.NewScheme() - batchCodecs = serializer.NewCodecFactory(batchScheme) -) - -func init() { - if err := batchv1.AddToScheme(batchScheme); err != nil { - panic(err) - } -} - -// ReadJobV1OrDie reads Job object from bytes. Panics on error. -func ReadJobV1OrDie(objBytes []byte) *batchv1.Job { - requiredObj, err := runtime.Decode(batchCodecs.UniversalDecoder(batchv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*batchv1.Job) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go deleted file mode 100644 index 644b5c3512..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/core.go +++ /dev/null @@ -1,52 +0,0 @@ -package resourceread - -import ( - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - coreScheme = runtime.NewScheme() - coreCodecs = serializer.NewCodecFactory(coreScheme) -) - -func init() { - if err := corev1.AddToScheme(coreScheme); err != nil { - panic(err) - } -} - -// ReadConfigMapV1OrDie reads configmap object from bytes. Panics on error. -func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap { - requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*corev1.ConfigMap) -} - -// ReadServiceAccountV1OrDie reads serviceaccount object from bytes. Panics on error. -func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount { - requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*corev1.ServiceAccount) -} - -func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace { - requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*corev1.Namespace) -} - -func ReadServiceV1OrDie(objBytes []byte) *corev1.Service { - requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*corev1.Service) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go deleted file mode 100644 index d53089a050..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/image.go +++ /dev/null @@ -1,27 +0,0 @@ -package resourceread - -import ( - imagev1 "github.com/openshift/api/image/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - imageScheme = runtime.NewScheme() - imageCodecs = serializer.NewCodecFactory(imageScheme) -) - -func init() { - if err := imagev1.AddToScheme(imageScheme); err != nil { - panic(err) - } -} - -// ReadImageStreamV1 reads imagestream object from bytes or reports an error. -func ReadImageStreamV1(objBytes []byte) (*imagev1.ImageStream, error) { - requiredObj, err := runtime.Decode(imageCodecs.UniversalDecoder(imagev1.SchemeGroupVersion), objBytes) - if err != nil { - return nil, err - } - return requiredObj.(*imagev1.ImageStream), nil -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go deleted file mode 100644 index 6eb4e4a3a0..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/rbac.go +++ /dev/null @@ -1,58 +0,0 @@ -package resourceread - -import ( - rbacv1 "k8s.io/api/rbac/v1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - rbacScheme = runtime.NewScheme() - rbacCodecs = serializer.NewCodecFactory(rbacScheme) -) - -func init() { - if err := rbacv1.AddToScheme(rbacScheme); err != nil { - panic(err) - } - if err := rbacv1beta1.AddToScheme(rbacScheme); err != nil { - panic(err) - } -} - -// ReadClusterRoleBindingV1OrDie reads clusterrolebinding object from bytes. Panics on error. -func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding { - requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*rbacv1.ClusterRoleBinding) -} - -// ReadClusterRoleV1OrDie reads clusterole object from bytes. Panics on error. -func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole { - requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*rbacv1.ClusterRole) -} - -// ReadRoleBindingV1OrDie reads clusterrolebinding object from bytes. Panics on error. -func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding { - requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*rbacv1.RoleBinding) -} - -// ReadRoleV1OrDie reads clusterole object from bytes. Panics on error. -func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role { - requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*rbacv1.Role) -} diff --git a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go b/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go deleted file mode 100644 index d2e9dbd121..0000000000 --- a/vendor/github.com/openshift/cluster-version-operator/lib/resourceread/security.go +++ /dev/null @@ -1,27 +0,0 @@ -package resourceread - -import ( - securityv1 "github.com/openshift/api/security/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -var ( - securityScheme = runtime.NewScheme() - securityCodecs = serializer.NewCodecFactory(securityScheme) -) - -func init() { - if err := securityv1.AddToScheme(securityScheme); err != nil { - panic(err) - } -} - -// ReadSecurityContextConstraintsV1OrDie reads clusterrolebinding object from bytes. Panics on error. -func ReadSecurityContextConstraintsV1OrDie(objBytes []byte) *securityv1.SecurityContextConstraints { - requiredObj, err := runtime.Decode(securityCodecs.UniversalDecoder(securityv1.SchemeGroupVersion), objBytes) - if err != nil { - panic(err) - } - return requiredObj.(*securityv1.SecurityContextConstraints) -}