diff --git a/glide.lock b/glide.lock index efbb9ced4cba..30172f914138 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 178d5bb0aa72040b6c081cd9d8d6f6f0a392b7a485cdfe76a9ae4c9af4058b2f -updated: 2019-03-08T16:40:10.642597021-05:00 +hash: 94079fe6736c7a94a3207f72ec8ef3cd4f1795106879643a573d45a96bf62bf5 +updated: 2019-03-19T13:24:40.958437953-04:00 imports: - name: bitbucket.org/ww/goautoneg version: 75cd24fc2f2c2a2088577d12123ddee5f54e0675 @@ -650,11 +650,11 @@ imports: - pagination - testhelper - name: github.com/gorilla/context - version: 51ce91d2eaddeca0ef29a71d766bb3634dadf729 + version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42 - name: github.com/gorilla/mux version: 08e7f807d38d6a870193019bb439056118661505 - name: github.com/gorilla/securecookie - version: e65cf8c5df817c89aeb47ecb46064e802e2de943 + version: 51f47194a536d357035c4ad9304e2fa42dde262a - name: github.com/gorilla/sessions version: a3acf13e802c358d65f249324d14ed24aac11370 - name: github.com/gorilla/websocket @@ -884,7 +884,7 @@ imports: - go-selinux - go-selinux/label - name: github.com/openshift/api - version: fe0c0ad070ce15d30b0a6dd20adb0ef883efe11a + version: 19e76f656f2b86e0624d6199e8b3fe49b89269ad subpackages: - apps - apps/v1 @@ -930,7 +930,7 @@ imports: - webconsole - webconsole/v1 - name: github.com/openshift/client-go - version: 2d89985ca24a8307e4b5db2574a62d3e6c2444b4 + version: 8ae2a9c33ba2a3d2ed5dc13d536ca935fc9625b9 subpackages: - apps/clientset/versioned - apps/clientset/versioned/fake @@ -1054,7 +1054,7 @@ imports: - user/informers/externalversions/user/v1 - user/listers/user/v1 - name: github.com/openshift/library-go - version: 10585fdc9322d30d622279729036a07f1ff7bbe1 + version: 69ec53fc354e648d1b1bd08556e702639f494688 subpackages: - pkg/config/configdefaults - pkg/config/helpers @@ -1070,7 +1070,7 @@ imports: - pkg/operator/resource/resourcemerge - pkg/serviceability - name: github.com/openshift/source-to-image - version: ff97a646334bca626f7950dce881acd7c27f2702 + version: 2ba8a349386aff03c26729096b0225a691355fe9 subpackages: - pkg/api - pkg/api/constants @@ -1087,13 +1087,13 @@ imports: - name: github.com/pborman/uuid version: ca53cad383cad2479bbba7f7a1a05797ec1386e4 - name: github.com/pelletier/go-toml - version: c01d1270ff3e442a8a57cddc1c92dc1138598194 + version: 16398bac157da96aa88f98a2df640c7f32af1da2 - name: github.com/peterbourgon/diskv version: 5f041e8faa004a95c88a202771f4cc3e991971e6 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/pkg/profile - version: 22592cc4bae3572d59be8852c6980023e4c9dec5 + version: 057bc52a47ec3c79498dda63f4a6f8298725e976 - name: github.com/pkg/sftp version: 4d0e916071f68db74f8a73926335f809396d6b42 - name: github.com/pmezard/go-difflib @@ -1445,7 +1445,7 @@ imports: - tap - transport - name: gopkg.in/asn1-ber.v1 - version: f715ec2f112d1e4195b827ad68cf44017a3ef2b1 + version: 379148ca0225df7a432012b8df0355c2a2063ac0 - name: gopkg.in/gcfg.v1 version: 27e4946190b4a327b539185f2b5b1f7c84730728 subpackages: @@ -1506,7 +1506,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 63bd5e1c39483f0ef7fd275794d19a7cba19b764 + version: cff3be58cff453c44c49af4595bc58f4a4217ff7 repo: https://github.com/openshift/kubernetes-apiextensions-apiserver.git subpackages: - pkg/apis/apiextensions @@ -1616,7 +1616,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: 377a9fd4c500fdc04567f7feeb44e28bc952dc95 + version: 4583122210dd7975e16fbec2d953e282bc6998e0 repo: https://github.com/openshift/kubernetes-apiserver.git subpackages: - pkg/admission @@ -1745,7 +1745,7 @@ imports: - pkg/genericclioptions/printers - pkg/genericclioptions/resource - name: k8s.io/client-go - version: 287ba817609cfd4079cdbb7fb36c4659c2c74341 + version: a6658b4c2ca255291938f67d3f7f17019d30a79c repo: https://github.com/openshift/kubernetes-client-go.git subpackages: - discovery @@ -1947,7 +1947,7 @@ imports: - util/testing - util/workqueue - name: k8s.io/code-generator - version: 1bdf8e8a8fde675de375d9d0a8fa77c4034be0a0 + version: 13d81ee386d3d039027dd42e7b3d1d30d8d788f2 repo: https://github.com/openshift/kubernetes-code-generator.git - name: k8s.io/csi-api version: 3690dd22ed3c34a731f5c46fb295ddd368c9c1a4 @@ -2030,7 +2030,7 @@ imports: subpackages: - config/v1beta1 - name: k8s.io/kubernetes - version: de991a22fa8b3fb4d68abc87a064377cc793135a + version: c398de65fed4a7fbdc0b8a0c0682623d9dad41e1 repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/cloud-controller-manager/app/apis/config diff --git a/hack/openapi-violation.list b/hack/openapi-violation.list index 3a0013cc0351..29cc2ace8c0f 100644 --- a/hack/openapi-violation.list +++ b/hack/openapi-violation.list @@ -30,8 +30,8 @@ API rule violation: names_match,github.com/openshift/api/config/v1,AdmissionConf API rule violation: names_match,github.com/openshift/api/config/v1,AdmissionConfig,EnabledAdmissionPlugins API rule violation: names_match,github.com/openshift/api/config/v1,AuthenticationSpec,OAuthMetadata API rule violation: names_match,github.com/openshift/api/config/v1,EtcdConnectionInfo,URLs -API rule violation: names_match,github.com/openshift/api/config/v1,FeatureEnabledDisabled,Disabled -API rule violation: names_match,github.com/openshift/api/config/v1,FeatureEnabledDisabled,Enabled +API rule violation: names_match,github.com/openshift/api/config/v1,FeatureGateEnabledDisabled,Disabled +API rule violation: names_match,github.com/openshift/api/config/v1,FeatureGateEnabledDisabled,Enabled API rule violation: names_match,github.com/openshift/api/config/v1,GenericAPIServerConfig,AdmissionConfig API rule violation: names_match,github.com/openshift/api/config/v1,IdentityProvider,UseAsChallenger API rule violation: names_match,github.com/openshift/api/config/v1,IdentityProvider,UseAsLogin @@ -78,6 +78,7 @@ API rule violation: names_match,github.com/openshift/api/network/v1,ClusterNetwo API rule violation: names_match,github.com/openshift/api/network/v1,NetNamespace,NetID API rule violation: names_match,github.com/openshift/api/network/v1,NetNamespace,NetName API rule violation: names_match,github.com/openshift/api/oauth/v1,ScopeRestriction,ExactValues +API rule violation: names_match,github.com/openshift/api/operator/v1,DefaultNetworkDefinition,OpenShiftSDNConfig API rule violation: names_match,github.com/openshift/api/operator/v1alpha1,OperatorStatus,CurrentAvailability API rule violation: names_match,github.com/openshift/api/operator/v1alpha1,OperatorStatus,TargetAvailability API rule violation: names_match,github.com/openshift/api/osin/v1,IdentityProvider,UseAsChallenger @@ -261,3 +262,4 @@ API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/a API rule violation: names_match,k8s.io/kubernetes/cmd/cloud-controller-manager/app/apis/config/v1alpha1,CloudControllerManagerConfiguration,ServiceController API rule violation: names_match,k8s.io/metrics/pkg/apis/custom_metrics/v1beta1,MetricValue,WindowSeconds API rule violation: names_match,k8s.io/metrics/pkg/apis/external_metrics/v1beta1,ExternalMetricValue,WindowSeconds +API rule violation: omitempty_match_case,github.com/openshift/api/operator/v1,StaticPodOperatorStatus,LatestAvailableRevision diff --git a/pkg/admission/customresourcevalidation/features/validate_features.go b/pkg/admission/customresourcevalidation/features/validate_features.go index f145781db463..c7d7291824b5 100644 --- a/pkg/admission/customresourcevalidation/features/validate_features.go +++ b/pkg/admission/customresourcevalidation/features/validate_features.go @@ -31,17 +31,17 @@ func Register(plugins *admission.Plugins) { }) } -func toFeatureV1(uncastObj runtime.Object) (*configv1.Features, field.ErrorList) { +func toFeatureGateV1(uncastObj runtime.Object) (*configv1.FeatureGate, field.ErrorList) { if uncastObj == nil { return nil, nil } allErrs := field.ErrorList{} - obj, ok := uncastObj.(*configv1.Features) + obj, ok := uncastObj.(*configv1.FeatureGate) if !ok { return nil, append(allErrs, - field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"Features"}), + field.NotSupported(field.NewPath("kind"), fmt.Sprintf("%T", uncastObj), []string{"FeatureGate"}), field.NotSupported(field.NewPath("apiVersion"), fmt.Sprintf("%T", uncastObj), []string{"config.openshift.io/v1"})) } @@ -53,7 +53,7 @@ type featuresV1 struct { var knownFeatureSets = sets.NewString("", string(configv1.TechPreviewNoUpgrade)) -func validateFeatureSpecCreate(spec configv1.FeaturesSpec) field.ErrorList { +func validateFeatureGateSpecCreate(spec configv1.FeatureGateSpec) field.ErrorList { allErrs := field.ErrorList{} // on create, we only allow values that we are aware of @@ -64,7 +64,7 @@ func validateFeatureSpecCreate(spec configv1.FeaturesSpec) field.ErrorList { return allErrs } -func validateFeatureSpecUpdate(spec, oldSpec configv1.FeaturesSpec) field.ErrorList { +func validateFeatureGateSpecUpdate(spec, oldSpec configv1.FeatureGateSpec) field.ErrorList { allErrs := field.ErrorList{} // on update, we don't fail validation on a field we don't recognize as long as it is not changing @@ -81,39 +81,39 @@ func validateFeatureSpecUpdate(spec, oldSpec configv1.FeaturesSpec) field.ErrorL } func (featuresV1) ValidateCreate(uncastObj runtime.Object) field.ErrorList { - obj, allErrs := toFeatureV1(uncastObj) + obj, allErrs := toFeatureGateV1(uncastObj) if len(allErrs) > 0 { return allErrs } allErrs = append(allErrs, validation.ValidateObjectMeta(&obj.ObjectMeta, false, customresourcevalidation.RequireNameCluster, field.NewPath("metadata"))...) - allErrs = append(allErrs, validateFeatureSpecCreate(obj.Spec)...) + allErrs = append(allErrs, validateFeatureGateSpecCreate(obj.Spec)...) return allErrs } func (featuresV1) ValidateUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { - obj, allErrs := toFeatureV1(uncastObj) + obj, allErrs := toFeatureGateV1(uncastObj) if len(allErrs) > 0 { return allErrs } - oldObj, allErrs := toFeatureV1(uncastOldObj) + oldObj, allErrs := toFeatureGateV1(uncastOldObj) if len(allErrs) > 0 { return allErrs } allErrs = append(allErrs, validation.ValidateObjectMetaUpdate(&obj.ObjectMeta, &oldObj.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, validateFeatureSpecUpdate(obj.Spec, oldObj.Spec)...) + allErrs = append(allErrs, validateFeatureGateSpecUpdate(obj.Spec, oldObj.Spec)...) return allErrs } func (featuresV1) ValidateStatusUpdate(uncastObj runtime.Object, uncastOldObj runtime.Object) field.ErrorList { - obj, errs := toFeatureV1(uncastObj) + obj, errs := toFeatureGateV1(uncastObj) if len(errs) > 0 { return errs } - oldObj, errs := toFeatureV1(uncastOldObj) + oldObj, errs := toFeatureGateV1(uncastOldObj) if len(errs) > 0 { return errs } diff --git a/pkg/admission/customresourcevalidation/features/validate_features_test.go b/pkg/admission/customresourcevalidation/features/validate_features_test.go index ad589e5314d5..3b71635d5dc5 100644 --- a/pkg/admission/customresourcevalidation/features/validate_features_test.go +++ b/pkg/admission/customresourcevalidation/features/validate_features_test.go @@ -32,7 +32,7 @@ func TestValidateCreateSpec(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - actual := validateFeatureSpecCreate(configv1.FeaturesSpec{FeatureSet: configv1.FeatureSet(tc.featureSet)}) + actual := validateFeatureGateSpecCreate(configv1.FeatureGateSpec{FeatureSet: configv1.FeatureSet(tc.featureSet)}) switch { case len(actual) == 0 && len(tc.expectedErr) == 0: case len(actual) == 0 && len(tc.expectedErr) != 0: @@ -95,9 +95,9 @@ func TestValidateUpdateSpec(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - actual := validateFeatureSpecUpdate( - configv1.FeaturesSpec{FeatureSet: configv1.FeatureSet(tc.featureSet)}, - configv1.FeaturesSpec{FeatureSet: configv1.FeatureSet(tc.oldFeatureSet)}, + actual := validateFeatureGateSpecUpdate( + configv1.FeatureGateSpec{FeatureSet: configv1.FeatureSet(tc.featureSet)}, + configv1.FeatureGateSpec{FeatureSet: configv1.FeatureSet(tc.oldFeatureSet)}, ) switch { case len(actual) == 0 && len(tc.expectedErr) == 0: diff --git a/pkg/openapi/zz_generated.openapi.go b/pkg/openapi/zz_generated.openapi.go index 03851f20d11b..7db1bef8caed 100644 --- a/pkg/openapi/zz_generated.openapi.go +++ b/pkg/openapi/zz_generated.openapi.go @@ -173,11 +173,11 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/config/v1.DelegatedAuthorization": schema_openshift_api_config_v1_DelegatedAuthorization(ref), "github.com/openshift/api/config/v1.EtcdConnectionInfo": schema_openshift_api_config_v1_EtcdConnectionInfo(ref), "github.com/openshift/api/config/v1.EtcdStorageConfig": schema_openshift_api_config_v1_EtcdStorageConfig(ref), - "github.com/openshift/api/config/v1.FeatureEnabledDisabled": schema_openshift_api_config_v1_FeatureEnabledDisabled(ref), - "github.com/openshift/api/config/v1.Features": schema_openshift_api_config_v1_Features(ref), - "github.com/openshift/api/config/v1.FeaturesList": schema_openshift_api_config_v1_FeaturesList(ref), - "github.com/openshift/api/config/v1.FeaturesSpec": schema_openshift_api_config_v1_FeaturesSpec(ref), - "github.com/openshift/api/config/v1.FeaturesStatus": schema_openshift_api_config_v1_FeaturesStatus(ref), + "github.com/openshift/api/config/v1.FeatureGate": schema_openshift_api_config_v1_FeatureGate(ref), + "github.com/openshift/api/config/v1.FeatureGateEnabledDisabled": schema_openshift_api_config_v1_FeatureGateEnabledDisabled(ref), + "github.com/openshift/api/config/v1.FeatureGateList": schema_openshift_api_config_v1_FeatureGateList(ref), + "github.com/openshift/api/config/v1.FeatureGateSpec": schema_openshift_api_config_v1_FeatureGateSpec(ref), + "github.com/openshift/api/config/v1.FeatureGateStatus": schema_openshift_api_config_v1_FeatureGateStatus(ref), "github.com/openshift/api/config/v1.GenericAPIServerConfig": schema_openshift_api_config_v1_GenericAPIServerConfig(ref), "github.com/openshift/api/config/v1.GenericControllerConfig": schema_openshift_api_config_v1_GenericControllerConfig(ref), "github.com/openshift/api/config/v1.GitHubIdentityProvider": schema_openshift_api_config_v1_GitHubIdentityProvider(ref), @@ -420,15 +420,18 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/openshiftcontrolplane/v1.ServiceAccountControllerConfig": schema_openshift_api_openshiftcontrolplane_v1_ServiceAccountControllerConfig(ref), "github.com/openshift/api/openshiftcontrolplane/v1.ServiceServingCert": schema_openshift_api_openshiftcontrolplane_v1_ServiceServingCert(ref), "github.com/openshift/api/openshiftcontrolplane/v1.SourceStrategyDefaultsConfig": schema_openshift_api_openshiftcontrolplane_v1_SourceStrategyDefaultsConfig(ref), + "github.com/openshift/api/operator/v1.AdditionalNetworkDefinition": schema_openshift_api_operator_v1_AdditionalNetworkDefinition(ref), "github.com/openshift/api/operator/v1.Authentication": schema_openshift_api_operator_v1_Authentication(ref), "github.com/openshift/api/operator/v1.AuthenticationList": schema_openshift_api_operator_v1_AuthenticationList(ref), "github.com/openshift/api/operator/v1.AuthenticationSpec": schema_openshift_api_operator_v1_AuthenticationSpec(ref), "github.com/openshift/api/operator/v1.AuthenticationStatus": schema_openshift_api_operator_v1_AuthenticationStatus(ref), + "github.com/openshift/api/operator/v1.ClusterNetworkEntry": schema_openshift_api_operator_v1_ClusterNetworkEntry(ref), "github.com/openshift/api/operator/v1.Console": schema_openshift_api_operator_v1_Console(ref), "github.com/openshift/api/operator/v1.ConsoleCustomization": schema_openshift_api_operator_v1_ConsoleCustomization(ref), "github.com/openshift/api/operator/v1.ConsoleList": schema_openshift_api_operator_v1_ConsoleList(ref), "github.com/openshift/api/operator/v1.ConsoleSpec": schema_openshift_api_operator_v1_ConsoleSpec(ref), "github.com/openshift/api/operator/v1.ConsoleStatus": schema_openshift_api_operator_v1_ConsoleStatus(ref), + "github.com/openshift/api/operator/v1.DefaultNetworkDefinition": schema_openshift_api_operator_v1_DefaultNetworkDefinition(ref), "github.com/openshift/api/operator/v1.EndpointPublishingStrategy": schema_openshift_api_operator_v1_EndpointPublishingStrategy(ref), "github.com/openshift/api/operator/v1.Etcd": schema_openshift_api_operator_v1_Etcd(ref), "github.com/openshift/api/operator/v1.EtcdList": schema_openshift_api_operator_v1_EtcdList(ref), @@ -454,8 +457,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/operator/v1.MyOperatorResource": schema_openshift_api_operator_v1_MyOperatorResource(ref), "github.com/openshift/api/operator/v1.MyOperatorResourceSpec": schema_openshift_api_operator_v1_MyOperatorResourceSpec(ref), "github.com/openshift/api/operator/v1.MyOperatorResourceStatus": schema_openshift_api_operator_v1_MyOperatorResourceStatus(ref), + "github.com/openshift/api/operator/v1.Network": schema_openshift_api_operator_v1_Network(ref), + "github.com/openshift/api/operator/v1.NetworkList": schema_openshift_api_operator_v1_NetworkList(ref), + "github.com/openshift/api/operator/v1.NetworkSpec": schema_openshift_api_operator_v1_NetworkSpec(ref), + "github.com/openshift/api/operator/v1.NetworkStatus": schema_openshift_api_operator_v1_NetworkStatus(ref), "github.com/openshift/api/operator/v1.NodePlacement": schema_openshift_api_operator_v1_NodePlacement(ref), "github.com/openshift/api/operator/v1.NodeStatus": schema_openshift_api_operator_v1_NodeStatus(ref), + "github.com/openshift/api/operator/v1.OVNKubernetesConfig": schema_openshift_api_operator_v1_OVNKubernetesConfig(ref), "github.com/openshift/api/operator/v1.OpenShiftAPIServer": schema_openshift_api_operator_v1_OpenShiftAPIServer(ref), "github.com/openshift/api/operator/v1.OpenShiftAPIServerList": schema_openshift_api_operator_v1_OpenShiftAPIServerList(ref), "github.com/openshift/api/operator/v1.OpenShiftAPIServerSpec": schema_openshift_api_operator_v1_OpenShiftAPIServerSpec(ref), @@ -464,11 +472,13 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "github.com/openshift/api/operator/v1.OpenShiftControllerManagerList": schema_openshift_api_operator_v1_OpenShiftControllerManagerList(ref), "github.com/openshift/api/operator/v1.OpenShiftControllerManagerSpec": schema_openshift_api_operator_v1_OpenShiftControllerManagerSpec(ref), "github.com/openshift/api/operator/v1.OpenShiftControllerManagerStatus": schema_openshift_api_operator_v1_OpenShiftControllerManagerStatus(ref), + "github.com/openshift/api/operator/v1.OpenShiftSDNConfig": schema_openshift_api_operator_v1_OpenShiftSDNConfig(ref), "github.com/openshift/api/operator/v1.OperandContainerSpec": schema_openshift_api_operator_v1_OperandContainerSpec(ref), "github.com/openshift/api/operator/v1.OperandSpec": schema_openshift_api_operator_v1_OperandSpec(ref), "github.com/openshift/api/operator/v1.OperatorCondition": schema_openshift_api_operator_v1_OperatorCondition(ref), "github.com/openshift/api/operator/v1.OperatorSpec": schema_openshift_api_operator_v1_OperatorSpec(ref), "github.com/openshift/api/operator/v1.OperatorStatus": schema_openshift_api_operator_v1_OperatorStatus(ref), + "github.com/openshift/api/operator/v1.ProxyConfig": schema_openshift_api_operator_v1_ProxyConfig(ref), "github.com/openshift/api/operator/v1.ResourcePatch": schema_openshift_api_operator_v1_ResourcePatch(ref), "github.com/openshift/api/operator/v1.ServiceCA": schema_openshift_api_operator_v1_ServiceCA(ref), "github.com/openshift/api/operator/v1.ServiceCAList": schema_openshift_api_operator_v1_ServiceCAList(ref), @@ -8803,50 +8813,11 @@ func schema_openshift_api_config_v1_EtcdStorageConfig(ref common.ReferenceCallba } } -func schema_openshift_api_config_v1_FeatureEnabledDisabled(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_FeatureGate(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "Enabled": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - "Disabled": { - SchemaProps: spec.SchemaProps{ - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"string"}, - Format: "", - }, - }, - }, - }, - }, - }, - Required: []string{"Enabled", "Disabled"}, - }, - }, - } -} - -func schema_openshift_api_config_v1_Features(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "Features holds cluster-wide information about feature gates. The canonical name is `cluster`", + Description: "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`", Type: []string{"object"}, Properties: map[string]spec.Schema{ "kind": { @@ -8872,13 +8843,13 @@ func schema_openshift_api_config_v1_Features(ref common.ReferenceCallback) commo "spec": { SchemaProps: spec.SchemaProps{ Description: "spec holds user settable values for configuration", - Ref: ref("github.com/openshift/api/config/v1.FeaturesSpec"), + Ref: ref("github.com/openshift/api/config/v1.FeatureGateSpec"), }, }, "status": { SchemaProps: spec.SchemaProps{ Description: "status holds observed values from the cluster. They may not be overridden.", - Ref: ref("github.com/openshift/api/config/v1.FeaturesStatus"), + Ref: ref("github.com/openshift/api/config/v1.FeatureGateStatus"), }, }, }, @@ -8886,11 +8857,50 @@ func schema_openshift_api_config_v1_Features(ref common.ReferenceCallback) commo }, }, Dependencies: []string{ - "github.com/openshift/api/config/v1.FeaturesSpec", "github.com/openshift/api/config/v1.FeaturesStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + "github.com/openshift/api/config/v1.FeatureGateSpec", "github.com/openshift/api/config/v1.FeatureGateStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, } } -func schema_openshift_api_config_v1_FeaturesList(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_FeatureGateEnabledDisabled(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Enabled": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "Disabled": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + Required: []string{"Enabled", "Disabled"}, + }, + }, + } +} + +func schema_openshift_api_config_v1_FeatureGateList(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -8922,7 +8932,7 @@ func schema_openshift_api_config_v1_FeaturesList(ref common.ReferenceCallback) c Items: &spec.SchemaOrArray{ Schema: &spec.Schema{ SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/openshift/api/config/v1.Features"), + Ref: ref("github.com/openshift/api/config/v1.FeatureGate"), }, }, }, @@ -8933,11 +8943,11 @@ func schema_openshift_api_config_v1_FeaturesList(ref common.ReferenceCallback) c }, }, Dependencies: []string{ - "github.com/openshift/api/config/v1.Features", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + "github.com/openshift/api/config/v1.FeatureGate", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, } } -func schema_openshift_api_config_v1_FeaturesSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_FeatureGateSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -8956,7 +8966,7 @@ func schema_openshift_api_config_v1_FeaturesSpec(ref common.ReferenceCallback) c } } -func schema_openshift_api_config_v1_FeaturesStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { +func schema_openshift_api_config_v1_FeatureGateStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ SchemaProps: spec.SchemaProps{ @@ -11548,6 +11558,13 @@ func schema_openshift_api_config_v1_SchedulerSpec(ref common.ReferenceCallback) Ref: ref("github.com/openshift/api/config/v1.ConfigMapNameReference"), }, }, + "defaultNodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", + Type: []string{"string"}, + Format: "", + }, + }, }, }, }, @@ -21080,6 +21097,41 @@ func schema_openshift_api_openshiftcontrolplane_v1_SourceStrategyDefaultsConfig( } } +func schema_openshift_api_operator_v1_AdditionalNetworkDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "type is the type of network The only supported value is NetworkTypeRaw", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + Type: []string{"string"}, + Format: "", + }, + }, + "rawCNIConfig": { + SchemaProps: spec.SchemaProps{ + Description: "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"type", "name", "rawCNIConfig"}, + }, + }, + } +} + func schema_openshift_api_operator_v1_Authentication(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -21279,7 +21331,7 @@ func schema_openshift_api_operator_v1_AuthenticationStatus(ref common.ReferenceC }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -21287,6 +21339,32 @@ func schema_openshift_api_operator_v1_AuthenticationStatus(ref common.ReferenceC } } +func schema_openshift_api_operator_v1_ClusterNetworkEntry(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. Not all network providers support multiple ClusterNetworks", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "cidr": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + "hostPrefix": { + SchemaProps: spec.SchemaProps{ + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + Required: []string{"cidr", "hostPrefix"}, + }, + }, + } +} + func schema_openshift_api_operator_v1_Console(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -21516,7 +21594,7 @@ func schema_openshift_api_operator_v1_ConsoleStatus(ref common.ReferenceCallback }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -21524,6 +21602,41 @@ func schema_openshift_api_operator_v1_ConsoleStatus(ref common.ReferenceCallback } } +func schema_openshift_api_operator_v1_DefaultNetworkDefinition(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "type": { + SchemaProps: spec.SchemaProps{ + Description: "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + Type: []string{"string"}, + Format: "", + }, + }, + "openshiftSDNConfig": { + SchemaProps: spec.SchemaProps{ + Description: "openShiftSDNConfig configures the openshift-sdn plugin", + Ref: ref("github.com/openshift/api/operator/v1.OpenShiftSDNConfig"), + }, + }, + "ovnKubernetesConfig": { + SchemaProps: spec.SchemaProps{ + Description: "oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented.", + Ref: ref("github.com/openshift/api/operator/v1.OVNKubernetesConfig"), + }, + }, + }, + Required: []string{"type"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/operator/v1.OVNKubernetesConfig", "github.com/openshift/api/operator/v1.OpenShiftSDNConfig"}, + } +} + func schema_openshift_api_operator_v1_EndpointPublishingStrategy(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -21788,7 +21901,7 @@ func schema_openshift_api_operator_v1_EtcdStatus(ref common.ReferenceCallback) c }, }, }, - Required: []string{"version", "readyReplicas", "generations", "latestAvailableRevision"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -21968,7 +22081,7 @@ func schema_openshift_api_operator_v1_IngressControllerSpec(ref common.Reference }, "endpointPublishingStrategy": { SchemaProps: spec.SchemaProps{ - Description: "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService\n All other platform types: Private\n\nendpointPublishingStrategy cannot be updated.", + Description: "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", Ref: ref("github.com/openshift/api/operator/v1.EndpointPublishingStrategy"), }, }, @@ -22038,12 +22151,25 @@ func schema_openshift_api_operator_v1_IngressControllerStatus(ref common.Referen Ref: ref("github.com/openshift/api/operator/v1.EndpointPublishingStrategy"), }, }, + "conditions": { + SchemaProps: spec.SchemaProps{ + Description: "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.OperatorCondition"), + }, + }, + }, + }, + }, }, Required: []string{"availableReplicas", "selector", "domain"}, }, }, Dependencies: []string{ - "github.com/openshift/api/operator/v1.EndpointPublishingStrategy"}, + "github.com/openshift/api/operator/v1.EndpointPublishingStrategy", "github.com/openshift/api/operator/v1.OperatorCondition"}, } } @@ -22290,7 +22416,7 @@ func schema_openshift_api_operator_v1_KubeAPIServerStatus(ref common.ReferenceCa }, }, }, - Required: []string{"version", "readyReplicas", "generations", "latestAvailableRevision"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -22541,7 +22667,7 @@ func schema_openshift_api_operator_v1_KubeControllerManagerStatus(ref common.Ref }, }, }, - Required: []string{"version", "readyReplicas", "generations", "latestAvailableRevision"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -22792,7 +22918,7 @@ func schema_openshift_api_operator_v1_KubeSchedulerStatus(ref common.ReferenceCa }, }, }, - Required: []string{"version", "readyReplicas", "generations", "latestAvailableRevision"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -22953,7 +23079,7 @@ func schema_openshift_api_operator_v1_MyOperatorResourceStatus(ref common.Refere }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -22961,6 +23087,190 @@ func schema_openshift_api_operator_v1_MyOperatorResourceStatus(ref common.Refere } } +func schema_openshift_api_operator_v1_Network(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.NetworkSpec"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.NetworkStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/operator/v1.NetworkSpec", "github.com/openshift/api/operator/v1.NetworkStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_openshift_api_operator_v1_NetworkList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NetworkList contains a list of Network configurations", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.Network"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/operator/v1.Network", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_openshift_api_operator_v1_NetworkSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NetworkSpec is the top-level network configuration object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "clusterNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.ClusterNetworkEntry"), + }, + }, + }, + }, + }, + "serviceNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "defaultNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "defaultNetwork is the \"default\" network that all pods will receive", + Ref: ref("github.com/openshift/api/operator/v1.DefaultNetworkDefinition"), + }, + }, + "additionalNetworks": { + SchemaProps: spec.SchemaProps{ + Description: "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("github.com/openshift/api/operator/v1.AdditionalNetworkDefinition"), + }, + }, + }, + }, + }, + "disableMultiNetwork": { + SchemaProps: spec.SchemaProps{ + Description: "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "deployKubeProxy": { + SchemaProps: spec.SchemaProps{ + Description: "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", + Type: []string{"boolean"}, + Format: "", + }, + }, + "kubeProxyConfig": { + SchemaProps: spec.SchemaProps{ + Description: "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", + Ref: ref("github.com/openshift/api/operator/v1.ProxyConfig"), + }, + }, + }, + Required: []string{"clusterNetwork", "serviceNetwork", "defaultNetwork"}, + }, + }, + Dependencies: []string{ + "github.com/openshift/api/operator/v1.AdditionalNetworkDefinition", "github.com/openshift/api/operator/v1.ClusterNetworkEntry", "github.com/openshift/api/operator/v1.DefaultNetworkDefinition", "github.com/openshift/api/operator/v1.ProxyConfig"}, + } +} + +func schema_openshift_api_operator_v1_NetworkStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "NetworkStatus is currently unused. Instead, status is reported in the Network.config.openshift.io object.", + Type: []string{"object"}, + }, + }, + } +} + func schema_openshift_api_operator_v1_NodePlacement(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -23038,6 +23348,26 @@ func schema_openshift_api_operator_v1_NodeStatus(ref common.ReferenceCallback) c } } +func schema_openshift_api_operator_v1_OVNKubernetesConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ovnKubernetesConfig is the proposed configuration parameters for networks using the ovn-kubernetes network project", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "mtu": { + SchemaProps: spec.SchemaProps{ + Description: "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + Type: []string{"integer"}, + Format: "int64", + }, + }, + }, + }, + }, + } +} + func schema_openshift_api_operator_v1_OpenShiftAPIServer(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -23240,7 +23570,7 @@ func schema_openshift_api_operator_v1_OpenShiftAPIServerStatus(ref common.Refere }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -23450,7 +23780,7 @@ func schema_openshift_api_operator_v1_OpenShiftControllerManagerStatus(ref commo }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -23458,6 +23788,48 @@ func schema_openshift_api_operator_v1_OpenShiftControllerManagerStatus(ref commo } } +func schema_openshift_api_operator_v1_OpenShiftSDNConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "OpenShiftSDNConfig configures the three openshift-sdn plugins", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "mode": { + SchemaProps: spec.SchemaProps{ + Description: "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + Type: []string{"string"}, + Format: "", + }, + }, + "vxlanPort": { + SchemaProps: spec.SchemaProps{ + Description: "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "mtu": { + SchemaProps: spec.SchemaProps{ + Description: "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "useExternalOpenvswitch": { + SchemaProps: spec.SchemaProps{ + Description: "useExternalOpenvswitch tells the operator not to install openvswitch, because it will be provided separately. If set, you must provide it yourself.", + Type: []string{"boolean"}, + Format: "", + }, + }, + }, + Required: []string{"mode"}, + }, + }, + } +} + func schema_openshift_api_operator_v1_OperandContainerSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -23689,7 +24061,7 @@ func schema_openshift_api_operator_v1_OperatorStatus(ref common.ReferenceCallbac }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -23697,6 +24069,55 @@ func schema_openshift_api_operator_v1_OperatorStatus(ref common.ReferenceCallbac } } +func schema_openshift_api_operator_v1_ProxyConfig(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "iptablesSyncPeriod": { + SchemaProps: spec.SchemaProps{ + Description: "The period that iptables rules are refreshed. Default: 30s", + Type: []string{"string"}, + Format: "", + }, + }, + "bindAddress": { + SchemaProps: spec.SchemaProps{ + Description: "The address to \"bind\" on Defaults to 0.0.0.0", + Type: []string{"string"}, + Format: "", + }, + }, + "proxyArguments": { + SchemaProps: spec.SchemaProps{ + Description: "Any additional arguments to pass to the kubeproxy process", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + func schema_openshift_api_operator_v1_ResourcePatch(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -23927,7 +24348,7 @@ func schema_openshift_api_operator_v1_ServiceCAStatus(ref common.ReferenceCallba }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -24137,7 +24558,7 @@ func schema_openshift_api_operator_v1_ServiceCatalogAPIServerStatus(ref common.R }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -24347,7 +24768,7 @@ func schema_openshift_api_operator_v1_ServiceCatalogControllerManagerStatus(ref }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -24499,7 +24920,7 @@ func schema_openshift_api_operator_v1_StaticPodOperatorStatus(ref common.Referen }, }, }, - Required: []string{"version", "readyReplicas", "generations", "latestAvailableRevision"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ @@ -28507,7 +28928,7 @@ func schema_openshift_api_servicecertsigner_v1alpha1_ServiceCertSignerOperatorCo }, }, }, - Required: []string{"version", "readyReplicas", "generations"}, + Required: []string{"readyReplicas"}, }, }, Dependencies: []string{ diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md index d31f2bad49f3..08f86693bcd8 100644 --- a/vendor/github.com/gorilla/context/README.md +++ b/vendor/github.com/gorilla/context/README.md @@ -7,4 +7,4 @@ gorilla/context is a general purpose registry for global request variables. > Note: gorilla/context, having been born well before `context.Context` existed, does not play well > with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. -Read the full documentation here: https://www.gorillatoolkit.org/pkg/context +Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/securecookie/.gitattributes b/vendor/github.com/gorilla/securecookie/.gitattributes deleted file mode 100644 index e9877b511a6c..000000000000 --- a/vendor/github.com/gorilla/securecookie/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -fuzz/corpus/*.sc linguist-detectable=false \ No newline at end of file diff --git a/vendor/github.com/gorilla/securecookie/.github/release_drafter.yml b/vendor/github.com/gorilla/securecookie/.github/release_drafter.yml deleted file mode 100644 index be3d952ebabb..000000000000 --- a/vendor/github.com/gorilla/securecookie/.github/release_drafter.yml +++ /dev/null @@ -1,4 +0,0 @@ -template: | - ## Changelog - - $CHANGES diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md index a914d4ab3c46..aa7bd1a5b2ed 100644 --- a/vendor/github.com/gorilla/securecookie/README.md +++ b/vendor/github.com/gorilla/securecookie/README.md @@ -1,9 +1,10 @@ -# securecookie - +securecookie +============ [![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/securecookie/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/securecookie?badge) -securecookie encodes and decodes authenticated and optionally encrypted + +securecookie encodes and decodes authenticated and optionally encrypted cookie values. Secure cookies can't be forged, because their values are validated using HMAC. @@ -32,10 +33,7 @@ to not use encryption. If set, the length must correspond to the block size of the encryption algorithm. For AES, used by default, valid lengths are 16, 24, or 32 bytes to select AES-128, AES-192, or AES-256. -Strong keys can be created using the convenience function -`GenerateRandomKey()`. Note that keys created using `GenerateRandomKey()` are not -automatically persisted. New keys will be created when the application is -restarted, and previously issued cookies will not be able to be decoded. +Strong keys can be created using the convenience function GenerateRandomKey(). Once a SecureCookie instance is set, use it to encode a cookie value: diff --git a/vendor/github.com/gorilla/securecookie/securecookie.go b/vendor/github.com/gorilla/securecookie/securecookie.go index a34f85128484..61af3900d635 100644 --- a/vendor/github.com/gorilla/securecookie/securecookie.go +++ b/vendor/github.com/gorilla/securecookie/securecookie.go @@ -506,10 +506,6 @@ func decode(value []byte) ([]byte, error) { // GenerateRandomKey creates a random key with the given length in bytes. // On failure, returns nil. // -// Note that keys created using `GenerateRandomKey()` are not automatically -// persisted. New keys will be created when the application is restarted, and -// previously issued cookies will not be able to be decoded. -// // Callers should explicitly check for the possibility of a nil return, treat // it as a failure of the system random number generator, and not continue. func GenerateRandomKey(length int) []byte { diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go index 685cebc123c2..96917232d4ea 100644 --- a/vendor/github.com/openshift/api/build/v1/consts.go +++ b/vendor/github.com/openshift/api/build/v1/consts.go @@ -87,7 +87,11 @@ const ( // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" - // StatusCannotRetrieveServiceAccount is the reason associated with a failure + // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure // to look up the service account associated with the BuildConfig. StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" + + // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted + // from its node + StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" ) diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index e096d9e31340..66c342569aa2 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -44,8 +44,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ConsoleList{}, &DNS{}, &DNSList{}, - &Features{}, - &FeaturesList{}, + &FeatureGate{}, + &FeatureGateList{}, &Image{}, &ImageList{}, &Infrastructure{}, diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index b1dea9cdf441..c8b5b482f50e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -52,5 +52,6 @@ type ConsoleAuthentication struct { // provides the user the option to perform single logout (SLO) through the identity // provider to destroy their single sign-on session. // +optional + // +kubebuilder:validation:Pattern=^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ LogoutRedirect string `json:"logoutRedirect,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_features.go b/vendor/github.com/openshift/api/config/v1/types_feature.go similarity index 79% rename from vendor/github.com/openshift/api/config/v1/types_features.go rename to vendor/github.com/openshift/api/config/v1/types_feature.go index 0cc54890cf27..df4e6a615800 100644 --- a/vendor/github.com/openshift/api/config/v1/types_features.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -6,18 +6,18 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Features holds cluster-wide information about feature gates. The canonical name is `cluster` -type Features struct { +// Feature holds cluster-wide information about feature gates. The canonical name is `cluster` +type FeatureGate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration // +required - Spec FeaturesSpec `json:"spec"` + Spec FeatureGateSpec `json:"spec"` // status holds observed values from the cluster. They may not be overridden. // +optional - Status FeaturesStatus `json:"status"` + Status FeatureGateStatus `json:"status"` } type FeatureSet string @@ -31,30 +31,30 @@ var ( TechPreviewNoUpgrade FeatureSet = "TechPreviewNoUpgrade" ) -type FeaturesSpec struct { +type FeatureGateSpec struct { // featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. // Turning on or off features may cause irreversible changes in your cluster which cannot be undone. FeatureSet FeatureSet `json:"featureSet,omitempty"` } -type FeaturesStatus struct { +type FeatureGateStatus struct { } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type FeaturesList struct { +type FeatureGateList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. metav1.ListMeta `json:"metadata"` - Items []Features `json:"items"` + Items []FeatureGate `json:"items"` } -type FeatureEnabledDisabled struct { +type FeatureGateEnabledDisabled struct { Enabled []string Disabled []string } -// FeatureSets Contains a map of Feature names to Enabled/Disabled Features. +// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. // // NOTE: The caller needs to make sure to check for the existence of the value // using golang's existence field. A possible scenario is an upgrade where new @@ -62,11 +62,11 @@ type FeatureEnabledDisabled struct { // version of this file. In this upgrade scenario the map could return nil. // // example: -// if featureSet, ok := FeaturesSets["SomeNewFeature"]; ok { } +// if featureSet, ok := FeatureSets["SomeNewFeature"]; ok { } // // If you put an item in either of these lists, put your area and name on it so we can find owners. -var FeatureSets = map[FeatureSet]*FeatureEnabledDisabled{ - Default: &FeatureEnabledDisabled{ +var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ + Default: &FeatureGateEnabledDisabled{ Enabled: []string{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning @@ -76,7 +76,7 @@ var FeatureSets = map[FeatureSet]*FeatureEnabledDisabled{ "PersistentLocalVolumes", // sig-storage, hekumar@redhat.com }, }, - TechPreviewNoUpgrade: &FeatureEnabledDisabled{ + TechPreviewNoUpgrade: &FeatureGateEnabledDisabled{ Enabled: []string{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index f07e7b083e81..a65ced9592cd 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -27,6 +27,27 @@ type SchedulerSpec struct { // The namespace for this configmap is openshift-config. // +optional Policy ConfigMapNameReference `json:"policy"` + // defaultNodeSelector helps set the cluster-wide default node selector to + // restrict pod placement to specific nodes. This is applied to the pods + // created in all namespaces without a specified nodeSelector value. + // For example, + // defaultNodeSelector: "type=user-node,region=east" would set nodeSelector + // field in pod spec to "type=user-node,region=east" to all pods created + // in all namespaces. Namespaces having project-wide node selectors won't be + // impacted even if this field is set. This adds an annotation section to + // the namespace. + // For example, if a new namespace is created with + // node-selector='type=user-node,region=east', + // the annotation openshift.io/node-selector: type=user-node,region=east + // gets added to the project. When the openshift.io/node-selector annotation + // is set on the project the value is used in preference to the value we are setting + // for defaultNodeSelector field. + // For instance, + // openshift.io/node-selector: "type=user-node,region=west" means + // that the default of "type=user-node,region=east" set in defaultNodeSelector + // would not be applied. + // +optional + DefaultNodeSelector string `json:"defaultNodeSelector,omitempty"` } type SchedulerStatus struct { diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index a3be0e654fb8..54f6273efb17 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1164,67 +1164,67 @@ func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeatureEnabledDisabled) DeepCopyInto(out *FeatureEnabledDisabled) { +func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]string, len(*in)) - copy(*out, *in) - } + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureEnabledDisabled. -func (in *FeatureEnabledDisabled) DeepCopy() *FeatureEnabledDisabled { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGate. +func (in *FeatureGate) DeepCopy() *FeatureGate { if in == nil { return nil } - out := new(FeatureEnabledDisabled) + out := new(FeatureGate) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *FeatureGate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Features) DeepCopyInto(out *Features) { +func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]string, len(*in)) + copy(*out, *in) + } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Features. -func (in *Features) DeepCopy() *Features { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateEnabledDisabled. +func (in *FeatureGateEnabledDisabled) DeepCopy() *FeatureGateEnabledDisabled { if in == nil { return nil } - out := new(Features) + out := new(FeatureGateEnabledDisabled) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Features) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeaturesList) DeepCopyInto(out *FeaturesList) { +func (in *FeatureGateList) DeepCopyInto(out *FeatureGateList) { *out = *in out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]Features, len(*in)) + *out = make([]FeatureGate, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1232,18 +1232,18 @@ func (in *FeaturesList) DeepCopyInto(out *FeaturesList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesList. -func (in *FeaturesList) DeepCopy() *FeaturesList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateList. +func (in *FeatureGateList) DeepCopy() *FeatureGateList { if in == nil { return nil } - out := new(FeaturesList) + out := new(FeatureGateList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *FeaturesList) DeepCopyObject() runtime.Object { +func (in *FeatureGateList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -1251,33 +1251,33 @@ func (in *FeaturesList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeaturesSpec) DeepCopyInto(out *FeaturesSpec) { +func (in *FeatureGateSpec) DeepCopyInto(out *FeatureGateSpec) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesSpec. -func (in *FeaturesSpec) DeepCopy() *FeaturesSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateSpec. +func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { if in == nil { return nil } - out := new(FeaturesSpec) + out := new(FeatureGateSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeaturesStatus) DeepCopyInto(out *FeaturesStatus) { +func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesStatus. -func (in *FeaturesStatus) DeepCopy() *FeaturesStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateStatus. +func (in *FeatureGateStatus) DeepCopy() *FeatureGateStatus { if in == nil { return nil } - out := new(FeaturesStatus) + out := new(FeatureGateStatus) in.DeepCopyInto(out) return out } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 1545e4e2b498..b55376c149db 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -610,31 +610,31 @@ func (DNSZone) SwaggerDoc() map[string]string { return map_DNSZone } -var map_Features = map[string]string{ - "": "Features holds cluster-wide information about feature gates. The canonical name is `cluster`", +var map_FeatureGate = map[string]string{ + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`", "metadata": "Standard object's metadata.", "spec": "spec holds user settable values for configuration", "status": "status holds observed values from the cluster. They may not be overridden.", } -func (Features) SwaggerDoc() map[string]string { - return map_Features +func (FeatureGate) SwaggerDoc() map[string]string { + return map_FeatureGate } -var map_FeaturesList = map[string]string{ +var map_FeatureGateList = map[string]string{ "metadata": "Standard object's metadata.", } -func (FeaturesList) SwaggerDoc() map[string]string { - return map_FeaturesList +func (FeatureGateList) SwaggerDoc() map[string]string { + return map_FeatureGateList } -var map_FeaturesSpec = map[string]string{ +var map_FeatureGateSpec = map[string]string{ "featureSet": "featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone.", } -func (FeaturesSpec) SwaggerDoc() map[string]string { - return map_FeaturesSpec +func (FeatureGateSpec) SwaggerDoc() map[string]string { + return map_FeatureGateSpec } var map_Image = map[string]string{ @@ -1132,7 +1132,8 @@ func (SchedulerList) SwaggerDoc() map[string]string { } var map_SchedulerSpec = map[string]string{ - "policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "policy": "policy is a reference to a ConfigMap containing scheduler policy which has user specified predicates and priorities. If this ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. The namespace for this configmap is openshift-config.", + "defaultNodeSelector": "defaultNodeSelector helps set the cluster-wide default node selector to restrict pod placement to specific nodes. This is applied to the pods created in all namespaces without a specified nodeSelector value. For example, defaultNodeSelector: \"type=user-node,region=east\" would set nodeSelector field in pod spec to \"type=user-node,region=east\" to all pods created in all namespaces. Namespaces having project-wide node selectors won't be impacted even if this field is set. This adds an annotation section to the namespace. For example, if a new namespace is created with node-selector='type=user-node,region=east', the annotation openshift.io/node-selector: type=user-node,region=east gets added to the project. When the openshift.io/node-selector annotation is set on the project the value is used in preference to the value we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: \"type=user-node,region=west\" means that the default of \"type=user-node,region=east\" set in defaultNodeSelector would not be applied.", } func (SchedulerSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1/register.go b/vendor/github.com/openshift/api/operator/v1/register.go index 8a4a2805fd8e..1d4a93399de7 100644 --- a/vendor/github.com/openshift/api/operator/v1/register.go +++ b/vendor/github.com/openshift/api/operator/v1/register.go @@ -44,6 +44,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &KubeControllerManagerList{}, &KubeScheduler{}, &KubeSchedulerList{}, + &Network{}, + &NetworkList{}, &OpenShiftAPIServer{}, &OpenShiftAPIServerList{}, &OpenShiftControllerManager{}, diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index 1e7509fca720..58f869fb8437 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -129,16 +129,19 @@ type OperatorStatus struct { ObservedGeneration int64 `json:"observedGeneration,omitempty"` // conditions is a list of conditions and their status + // +optional Conditions []OperatorCondition `json:"conditions,omitempty"` // version is the level this availability applies to - Version string `json:"version"` + // +optional + Version string `json:"version,omitempty"` // readyReplicas indicates how many replicas are ready and at the desired state ReadyReplicas int32 `json:"readyReplicas"` // generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. - Generations []GenerationStatus `json:"generations"` + // +optional + Generations []GenerationStatus `json:"generations,omitempty"` } // GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. @@ -206,9 +209,11 @@ type StaticPodOperatorStatus struct { OperatorStatus `json:",inline"` // latestAvailableRevision is the deploymentID of the most recent deployment - LatestAvailableRevision int32 `json:"latestAvailableRevision"` + // +optional + LatestAvailableRevision int32 `json:"latestAvailableRevision,omitEmpty"` // nodeStatuses track the deployment values and errors across individual nodes + // +optional NodeStatuses []NodeStatus `json:"nodeStatuses,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index 3f3ee26a3a08..6aa8edfc192f 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -48,13 +48,15 @@ const ( // Branding for OpenShift BrandOpenShift Brand = "openshift" // Branding for The Origin Community Distribution of Kubernetes - BrandOKD Brand = "okd" + BrandOKD Brand = "okd" // Branding for OpenShift Online - BrandOnline Brand = "online" + BrandOnline Brand = "online" // Branding for OpenShift Container Platform - BrandOCP Brand = "ocp" + BrandOCP Brand = "ocp" // Branding for OpenShift Dedicated BrandDedicated Brand = "dedicated" + // Branding for Azure Red Hat OpenShift + BrandAzure Brand = "azure" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 9ec6b116872d..79bcaf7e9e39 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -67,8 +67,10 @@ type IngressControllerSpec struct { // If unset, the default is based on // infrastructure.config.openshift.io/cluster .status.platform: // - // AWS: LoadBalancerService - // All other platform types: Private + // AWS: LoadBalancerService + // Libvirt: HostNetwork + // + // Any other platform types (including None) default to HostNetwork. // // endpointPublishingStrategy cannot be updated. // @@ -195,6 +197,23 @@ type EndpointPublishingStrategy struct { Type EndpointPublishingStrategyType `json:"type"` } +var ( + // Available indicates the ingress controller deployment is available. + IngressControllerAvailableConditionType = "Available" + // LoadBalancerManaged indicates the management status of any load balancer + // service associated with an ingress controller. + LoadBalancerManagedIngressConditionType = "LoadBalancerManaged" + // LoadBalancerReady indicates the ready state of any load balancer service + // associated with an ingress controller. + LoadBalancerReadyIngressConditionType = "LoadBalancerReady" + // DNSManaged indicates the management status of any DNS records for the + // ingress controller. + DNSManagedIngressConditionType = "DNSManaged" + // DNSReady indicates the ready state of any DNS records for the ingress + // controller. + DNSReadyIngressConditionType = "DNSReady" +) + // IngressControllerStatus defines the observed status of the IngressController. type IngressControllerStatus struct { // availableReplicas is number of observed available replicas according to the @@ -211,6 +230,40 @@ type IngressControllerStatus struct { // endpointPublishingStrategy is the actual strategy in use. EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` + + // conditions is a list of conditions and their status. + // + // Available means the ingress controller deployment is available and + // servicing route and ingress resources (i.e, .status.availableReplicas + // equals .spec.replicas) + // + // There are additional conditions which indicate the status of other + // ingress controller features and capabilities. + // + // * LoadBalancerManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy requires a service load balancer. + // - False if any of those conditions are unsatisfied. + // + // * LoadBalancerReady + // - True if the following conditions are met: + // * A load balancer is managed. + // * The load balancer is ready. + // - False if any of those conditions are unsatisfied. + // + // * DNSManaged + // - True if the following conditions are met: + // * The endpoint publishing strategy and platform support DNS. + // * The ingress controller domain is set. + // * dns.config.openshift.io/cluster configures DNS zones. + // - False if any of those conditions are unsatisfied. + // + // * DNSReady + // - True if the following conditions are met: + // * DNS is managed. + // * DNS records have been successfully created. + // - False if any of those conditions are unsatisfied. + Conditions []OperatorCondition `json:"conditions,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go new file mode 100644 index 000000000000..53052e8b2ae7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -0,0 +1,189 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Network describes the cluster's desired network configuration. It is +// consumed by the cluster-network-operator. +// +k8s:openapi-gen=true +type Network struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NetworkSpec `json:"spec,omitempty"` + Status NetworkStatus `json:"status,omitempty"` +} + +// NetworkStatus is currently unused. Instead, status +// is reported in the Network.config.openshift.io object. +type NetworkStatus struct { +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkList contains a list of Network configurations +type NetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Network `json:"items"` +} + +// NetworkSpec is the top-level network configuration object. +type NetworkSpec struct { + // clusterNetwork is the IP address pool to use for pod IPs. + // Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. + // Others only support one. This is equivalent to the cluster-cidr. + ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork"` + + // serviceNetwork is the ip address pool to use for Service IPs + // Currently, all existing network providers only support a single value + // here, but this is an array to allow for growth. + ServiceNetwork []string `json:"serviceNetwork"` + + // defaultNetwork is the "default" network that all pods will receive + DefaultNetwork DefaultNetworkDefinition `json:"defaultNetwork"` + + // additionalNetworks is a list of extra networks to make available to pods + // when multiple networks are enabled. + AdditionalNetworks []AdditionalNetworkDefinition `json:"additionalNetworks,omitempty"` + + // disableMultiNetwork specifies whether or not multiple pod network + // support should be disabled. If unset, this property defaults to + // 'false' and multiple network support is enabled. + DisableMultiNetwork *bool `json:"disableMultiNetwork,omitempty"` + + // deployKubeProxy specifies whether or not a standalone kube-proxy should + // be deployed by the operator. Some network providers include kube-proxy + // or similar functionality. If unset, the plugin will attempt to select + // the correct value, which is false when OpenShift SDN and ovn-kubernetes are + // used and true otherwise. + // +optional + DeployKubeProxy *bool `json:"deployKubeProxy,omitempty"` + + // kubeProxyConfig lets us configure desired proxy configuration. + // If not specified, sensible defaults will be chosen by OpenShift directly. + // Not consumed by all network providers - currently only openshift-sdn. + KubeProxyConfig *ProxyConfig `json:"kubeProxyConfig,omitempty"` +} + +// ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size +// HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. +// Not all network providers support multiple ClusterNetworks +type ClusterNetworkEntry struct { + CIDR string `json:"cidr"` + HostPrefix uint32 `json:"hostPrefix"` +} + +// DefaultNetworkDefinition represents a single network plugin's configuration. +// type must be specified, along with exactly one "Config" that matches the type. +type DefaultNetworkDefinition struct { + // type is the type of network + // All NetworkTypes are supported except for NetworkTypeRaw + Type NetworkType `json:"type"` + + // openShiftSDNConfig configures the openshift-sdn plugin + // +optional + OpenShiftSDNConfig *OpenShiftSDNConfig `json:"openshiftSDNConfig,omitempty"` + + // oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently + // not implemented. + // +optional + OVNKubernetesConfig *OVNKubernetesConfig `json:"ovnKubernetesConfig,omitempty"` +} + +// AdditionalNetworkDefinition configures an extra network that is available but not +// created by default. Instead, pods must request them by name. +// type must be specified, along with exactly one "Config" that matches the type. +type AdditionalNetworkDefinition struct { + // type is the type of network + // The only supported value is NetworkTypeRaw + Type NetworkType `json:"type"` + + // name is the name of the network. This will be populated in the resulting CRD + // This must be unique. + Name string `json:"name"` + + // rawCNIConfig is the raw CNI configuration json to create in the + // NetworkAttachmentDefinition CRD + RawCNIConfig string `json:"rawCNIConfig"` +} + +// OpenShiftSDNConfig configures the three openshift-sdn plugins +type OpenShiftSDNConfig struct { + // mode is one of "Multitenant", "Subnet", or "NetworkPolicy" + Mode SDNMode `json:"mode"` + + // vxlanPort is the port to use for all vxlan packets. The default is 4789. + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty"` + + // mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. + // This must be 50 bytes smaller than the machine's uplink. + // +optional + MTU *uint32 `json:"mtu,omitempty"` + + // useExternalOpenvswitch tells the operator not to install openvswitch, because + // it will be provided separately. If set, you must provide it yourself. + // +optional + UseExternalOpenvswitch *bool `json:"useExternalOpenvswitch,omitempty"` +} + +// ovnKubernetesConfig is the proposed configuration parameters for networks +// using the ovn-kubernetes network project +type OVNKubernetesConfig struct { + // mtu is the MTU to use for the tunnel interface. This must be 100 + // bytes smaller than the uplink mtu. + // Default is 1400 + MTU *uint32 `json:"mtu,omitempty"` +} + +// NetworkType describes the network plugin type to configure +type NetworkType string + +// ProxyConfig defines the configuration knobs for kubeproxy +// All of these are optional and have sensible defaults +type ProxyConfig struct { + // The period that iptables rules are refreshed. + // Default: 30s + IptablesSyncPeriod string `json:"iptablesSyncPeriod,omitempty"` + + // The address to "bind" on + // Defaults to 0.0.0.0 + BindAddress string `json:"bindAddress,omitempty"` + + // Any additional arguments to pass to the kubeproxy process + ProxyArguments map[string][]string `json:"proxyArguments,omitempty"` +} + +const ( + // NetworkTypeOpenShiftSDN means the openshift-sdn plugin will be configured + NetworkTypeOpenShiftSDN NetworkType = "OpenShiftSDN" + + // NetworkTypeOVNKubernetes means the ovn-kubernetes project will be configured. + // This is currently not implemented. + NetworkTypeOVNKubernetes NetworkType = "OVNKubernetes" + + // NetworkTypeRaw + NetworkTypeRaw NetworkType = "Raw" +) + +// SDNMode is the Mode the openshift-sdn plugin is in +type SDNMode string + +const ( + // SDNModeSubnet is a simple mode that offers no isolation between pods + SDNModeSubnet SDNMode = "Subnet" + + // SDNModeMultitenant is a special "multitenant" mode that offers limited + // isolation configuration between namespaces + SDNModeMultitenant SDNMode = "Multitenant" + + // SDNModeNetworkPolicy is a full NetworkPolicy implementation that allows + // for sophisticated network isolation and segmenting. This is the default. + SDNModeNetworkPolicy SDNMode = "NetworkPolicy" +) diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index cfdda75919c8..105519ea8d6e 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -10,6 +10,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdditionalNetworkDefinition) DeepCopyInto(out *AdditionalNetworkDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdditionalNetworkDefinition. +func (in *AdditionalNetworkDefinition) DeepCopy() *AdditionalNetworkDefinition { + if in == nil { + return nil + } + out := new(AdditionalNetworkDefinition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Authentication) DeepCopyInto(out *Authentication) { *out = *in @@ -105,6 +121,22 @@ func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Console) DeepCopyInto(out *Console) { *out = *in @@ -217,6 +249,40 @@ func (in *ConsoleStatus) DeepCopy() *ConsoleStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultNetworkDefinition) DeepCopyInto(out *DefaultNetworkDefinition) { + *out = *in + if in.OpenShiftSDNConfig != nil { + in, out := &in.OpenShiftSDNConfig, &out.OpenShiftSDNConfig + if *in == nil { + *out = nil + } else { + *out = new(OpenShiftSDNConfig) + (*in).DeepCopyInto(*out) + } + } + if in.OVNKubernetesConfig != nil { + in, out := &in.OVNKubernetesConfig, &out.OVNKubernetesConfig + if *in == nil { + *out = nil + } else { + *out = new(OVNKubernetesConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultNetworkDefinition. +func (in *DefaultNetworkDefinition) DeepCopy() *DefaultNetworkDefinition { + if in == nil { + return nil + } + out := new(DefaultNetworkDefinition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointPublishingStrategy) DeepCopyInto(out *EndpointPublishingStrategy) { *out = *in @@ -487,6 +553,13 @@ func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -839,6 +912,142 @@ func (in *MyOperatorResourceStatus) DeepCopy() *MyOperatorResourceStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Network) DeepCopyInto(out *Network) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network. +func (in *Network) DeepCopy() *Network { + if in == nil { + return nil + } + out := new(Network) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Network) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkList) DeepCopyInto(out *NetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Network, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkList. +func (in *NetworkList) DeepCopy() *NetworkList { + if in == nil { + return nil + } + out := new(NetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.ClusterNetwork != nil { + in, out := &in.ClusterNetwork, &out.ClusterNetwork + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ServiceNetwork != nil { + in, out := &in.ServiceNetwork, &out.ServiceNetwork + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.DefaultNetwork.DeepCopyInto(&out.DefaultNetwork) + if in.AdditionalNetworks != nil { + in, out := &in.AdditionalNetworks, &out.AdditionalNetworks + *out = make([]AdditionalNetworkDefinition, len(*in)) + copy(*out, *in) + } + if in.DisableMultiNetwork != nil { + in, out := &in.DisableMultiNetwork, &out.DisableMultiNetwork + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.DeployKubeProxy != nil { + in, out := &in.DeployKubeProxy, &out.DeployKubeProxy + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.KubeProxyConfig != nil { + in, out := &in.KubeProxyConfig, &out.KubeProxyConfig + if *in == nil { + *out = nil + } else { + *out = new(ProxyConfig) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkStatus) DeepCopyInto(out *NetworkStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkStatus. +func (in *NetworkStatus) DeepCopy() *NetworkStatus { + if in == nil { + return nil + } + out := new(NetworkStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePlacement) DeepCopyInto(out *NodePlacement) { *out = *in @@ -885,6 +1094,31 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { + *out = *in + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + if *in == nil { + *out = nil + } else { + *out = new(uint32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OVNKubernetesConfig. +func (in *OVNKubernetesConfig) DeepCopy() *OVNKubernetesConfig { + if in == nil { + return nil + } + out := new(OVNKubernetesConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenShiftAPIServer) DeepCopyInto(out *OpenShiftAPIServer) { *out = *in @@ -1075,6 +1309,49 @@ func (in *OpenShiftControllerManagerStatus) DeepCopy() *OpenShiftControllerManag return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftSDNConfig) DeepCopyInto(out *OpenShiftSDNConfig) { + *out = *in + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + if *in == nil { + *out = nil + } else { + *out = new(uint32) + **out = **in + } + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + if *in == nil { + *out = nil + } else { + *out = new(uint32) + **out = **in + } + } + if in.UseExternalOpenvswitch != nil { + in, out := &in.UseExternalOpenvswitch, &out.UseExternalOpenvswitch + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftSDNConfig. +func (in *OpenShiftSDNConfig) DeepCopy() *OpenShiftSDNConfig { + if in == nil { + return nil + } + out := new(OpenShiftSDNConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperandContainerSpec) DeepCopyInto(out *OperandContainerSpec) { *out = *in @@ -1198,6 +1475,34 @@ func (in *OperatorStatus) DeepCopy() *OperatorStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourcePatch) DeepCopyInto(out *ResourcePatch) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index c81f34c2ff1f..f2358024554d 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -219,7 +219,7 @@ var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", "replicas": "replicas is the desired number of ingress controller replicas. If unset, defaults to 2.", - "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService\n All other platform types: Private\n\nendpointPublishingStrategy cannot be updated.", + "endpointPublishingStrategy": "endpointPublishingStrategy is used to publish the ingress controller endpoints to other networks, enable load balancer integrations, etc.\n\nIf unset, the default is based on infrastructure.config.openshift.io/cluster .status.platform:\n\n AWS: LoadBalancerService\n Libvirt: HostNetwork\n\nAny other platform types (including None) default to HostNetwork.\n\nendpointPublishingStrategy cannot be updated.", "defaultCertificate": "defaultCertificate is a reference to a secret containing the default certificate served by the ingress controller. When Routes don't specify their own certificate, defaultCertificate is used.\n\nThe secret must contain the following keys and data:\n\n tls.crt: certificate file contents\n tls.key: key file contents\n\nIf unset, a wildcard certificate is automatically generated and used. The certificate is valid for the ingress controller domain (and subdomains) and the generated certificate's CA will be automatically integrated with the cluster's trust store.\n\nThe in-use certificate (whether generated or user-specified) will be automatically integrated with OpenShift's built-in OAuth server.", "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", @@ -236,6 +236,7 @@ var map_IngressControllerStatus = map[string]string{ "selector": "selector is a label selector, in string format, for ingress controller pods corresponding to the IngressController. The number of matching pods should equal the value of availableReplicas.", "domain": "domain is the actual domain in use.", "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", + "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", } func (IngressControllerStatus) SwaggerDoc() map[string]string { @@ -303,6 +304,107 @@ func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { return map_KubeControllerManagerSpec } +var map_AdditionalNetworkDefinition = map[string]string{ + "": "AdditionalNetworkDefinition configures an extra network that is available but not created by default. Instead, pods must request them by name. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network The only supported value is NetworkTypeRaw", + "name": "name is the name of the network. This will be populated in the resulting CRD This must be unique.", + "rawCNIConfig": "rawCNIConfig is the raw CNI configuration json to create in the NetworkAttachmentDefinition CRD", +} + +func (AdditionalNetworkDefinition) SwaggerDoc() map[string]string { + return map_AdditionalNetworkDefinition +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry is a subnet from which to allocate PodIPs. A network of size HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. Not all network providers support multiple ClusterNetworks", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DefaultNetworkDefinition = map[string]string{ + "": "DefaultNetworkDefinition represents a single network plugin's configuration. type must be specified, along with exactly one \"Config\" that matches the type.", + "type": "type is the type of network All NetworkTypes are supported except for NetworkTypeRaw", + "openshiftSDNConfig": "openShiftSDNConfig configures the openshift-sdn plugin", + "ovnKubernetesConfig": "oVNKubernetesConfig configures the ovn-kubernetes plugin. This is currently not implemented.", +} + +func (DefaultNetworkDefinition) SwaggerDoc() map[string]string { + return map_DefaultNetworkDefinition +} + +var map_Network = map[string]string{ + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.", +} + +func (Network) SwaggerDoc() map[string]string { + return map_Network +} + +var map_NetworkList = map[string]string{ + "": "NetworkList contains a list of Network configurations", +} + +func (NetworkList) SwaggerDoc() map[string]string { + return map_NetworkList +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec is the top-level network configuration object.", + "clusterNetwork": "clusterNetwork is the IP address pool to use for pod IPs. Some network providers, e.g. OpenShift SDN, support multiple ClusterNetworks. Others only support one. This is equivalent to the cluster-cidr.", + "serviceNetwork": "serviceNetwork is the ip address pool to use for Service IPs Currently, all existing network providers only support a single value here, but this is an array to allow for growth.", + "defaultNetwork": "defaultNetwork is the \"default\" network that all pods will receive", + "additionalNetworks": "additionalNetworks is a list of extra networks to make available to pods when multiple networks are enabled.", + "disableMultiNetwork": "disableMultiNetwork specifies whether or not multiple pod network support should be disabled. If unset, this property defaults to 'false' and multiple network support is enabled.", + "deployKubeProxy": "deployKubeProxy specifies whether or not a standalone kube-proxy should be deployed by the operator. Some network providers include kube-proxy or similar functionality. If unset, the plugin will attempt to select the correct value, which is false when OpenShift SDN and ovn-kubernetes are used and true otherwise.", + "kubeProxyConfig": "kubeProxyConfig lets us configure desired proxy configuration. If not specified, sensible defaults will be chosen by OpenShift directly. Not consumed by all network providers - currently only openshift-sdn.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_NetworkStatus = map[string]string{ + "": "NetworkStatus is currently unused. Instead, status is reported in the Network.config.openshift.io object.", +} + +func (NetworkStatus) SwaggerDoc() map[string]string { + return map_NetworkStatus +} + +var map_OVNKubernetesConfig = map[string]string{ + "": "ovnKubernetesConfig is the proposed configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", +} + +func (OVNKubernetesConfig) SwaggerDoc() map[string]string { + return map_OVNKubernetesConfig +} + +var map_OpenShiftSDNConfig = map[string]string{ + "": "OpenShiftSDNConfig configures the three openshift-sdn plugins", + "mode": "mode is one of \"Multitenant\", \"Subnet\", or \"NetworkPolicy\"", + "vxlanPort": "vxlanPort is the port to use for all vxlan packets. The default is 4789.", + "mtu": "mtu is the mtu to use for the tunnel interface. Defaults to 1450 if unset. This must be 50 bytes smaller than the machine's uplink.", + "useExternalOpenvswitch": "useExternalOpenvswitch tells the operator not to install openvswitch, because it will be provided separately. If set, you must provide it yourself.", +} + +func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { + return map_OpenShiftSDNConfig +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines the configuration knobs for kubeproxy All of these are optional and have sensible defaults", + "iptablesSyncPeriod": "The period that iptables rules are refreshed. Default: 30s", + "bindAddress": "The address to \"bind\" on Defaults to 0.0.0.0", + "proxyArguments": "Any additional arguments to pass to the kubeproxy process", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + var map_OpenShiftAPIServer = map[string]string{ "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.", } diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go index cf870c8e5544..cd28c36e2d07 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/config_client.go @@ -18,7 +18,7 @@ type ConfigV1Interface interface { ClusterVersionsGetter ConsolesGetter DNSesGetter - FeaturesGetter + FeatureGatesGetter ImagesGetter InfrastructuresGetter IngressesGetter @@ -62,8 +62,8 @@ func (c *ConfigV1Client) DNSes() DNSInterface { return newDNSes(c) } -func (c *ConfigV1Client) Features() FeaturesInterface { - return newFeatures(c) +func (c *ConfigV1Client) FeatureGates() FeatureGateInterface { + return newFeatureGates(c) } func (c *ConfigV1Client) Images() ImageInterface { diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go index 76419cc395c4..45a6a4b91482 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_config_client.go @@ -40,8 +40,8 @@ func (c *FakeConfigV1) DNSes() v1.DNSInterface { return &FakeDNSes{c} } -func (c *FakeConfigV1) Features() v1.FeaturesInterface { - return &FakeFeatures{c} +func (c *FakeConfigV1) FeatureGates() v1.FeatureGateInterface { + return &FakeFeatureGates{c} } func (c *FakeConfigV1) Images() v1.ImageInterface { diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go new file mode 100644 index 000000000000..dc0a0a0967c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_featuregate.go @@ -0,0 +1,115 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + config_v1 "github.com/openshift/api/config/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeFeatureGates implements FeatureGateInterface +type FakeFeatureGates struct { + Fake *FakeConfigV1 +} + +var featuregatesResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "featuregates"} + +var featuregatesKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "FeatureGate"} + +// Get takes name of the featureGate, and returns the corresponding featureGate object, and an error if there is any. +func (c *FakeFeatureGates) Get(name string, options v1.GetOptions) (result *config_v1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(featuregatesResource, name), &config_v1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*config_v1.FeatureGate), err +} + +// List takes label and field selectors, and returns the list of FeatureGates that match those selectors. +func (c *FakeFeatureGates) List(opts v1.ListOptions) (result *config_v1.FeatureGateList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(featuregatesResource, featuregatesKind, opts), &config_v1.FeatureGateList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &config_v1.FeatureGateList{ListMeta: obj.(*config_v1.FeatureGateList).ListMeta} + for _, item := range obj.(*config_v1.FeatureGateList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested featureGates. +func (c *FakeFeatureGates) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(featuregatesResource, opts)) +} + +// Create takes the representation of a featureGate and creates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *FakeFeatureGates) Create(featureGate *config_v1.FeatureGate) (result *config_v1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(featuregatesResource, featureGate), &config_v1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*config_v1.FeatureGate), err +} + +// Update takes the representation of a featureGate and updates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *FakeFeatureGates) Update(featureGate *config_v1.FeatureGate) (result *config_v1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(featuregatesResource, featureGate), &config_v1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*config_v1.FeatureGate), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeFeatureGates) UpdateStatus(featureGate *config_v1.FeatureGate) (*config_v1.FeatureGate, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(featuregatesResource, "status", featureGate), &config_v1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*config_v1.FeatureGate), err +} + +// Delete takes name of the featureGate and deletes it. Returns an error if one occurs. +func (c *FakeFeatureGates) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(featuregatesResource, name), &config_v1.FeatureGate{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeFeatureGates) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(featuregatesResource, listOptions) + + _, err := c.Fake.Invokes(action, &config_v1.FeatureGateList{}) + return err +} + +// Patch applies the patch and returns the patched featureGate. +func (c *FakeFeatureGates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *config_v1.FeatureGate, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(featuregatesResource, name, data, subresources...), &config_v1.FeatureGate{}) + if obj == nil { + return nil, err + } + return obj.(*config_v1.FeatureGate), err +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_features.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_features.go deleted file mode 100644 index 6952af793aad..000000000000 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/fake/fake_features.go +++ /dev/null @@ -1,115 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - config_v1 "github.com/openshift/api/config/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - schema "k8s.io/apimachinery/pkg/runtime/schema" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - testing "k8s.io/client-go/testing" -) - -// FakeFeatures implements FeaturesInterface -type FakeFeatures struct { - Fake *FakeConfigV1 -} - -var featuresResource = schema.GroupVersionResource{Group: "config.openshift.io", Version: "v1", Resource: "features"} - -var featuresKind = schema.GroupVersionKind{Group: "config.openshift.io", Version: "v1", Kind: "Features"} - -// Get takes name of the features, and returns the corresponding features object, and an error if there is any. -func (c *FakeFeatures) Get(name string, options v1.GetOptions) (result *config_v1.Features, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(featuresResource, name), &config_v1.Features{}) - if obj == nil { - return nil, err - } - return obj.(*config_v1.Features), err -} - -// List takes label and field selectors, and returns the list of Features that match those selectors. -func (c *FakeFeatures) List(opts v1.ListOptions) (result *config_v1.FeaturesList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(featuresResource, featuresKind, opts), &config_v1.FeaturesList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &config_v1.FeaturesList{ListMeta: obj.(*config_v1.FeaturesList).ListMeta} - for _, item := range obj.(*config_v1.FeaturesList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested features. -func (c *FakeFeatures) Watch(opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(featuresResource, opts)) -} - -// Create takes the representation of a features and creates it. Returns the server's representation of the features, and an error, if there is any. -func (c *FakeFeatures) Create(features *config_v1.Features) (result *config_v1.Features, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(featuresResource, features), &config_v1.Features{}) - if obj == nil { - return nil, err - } - return obj.(*config_v1.Features), err -} - -// Update takes the representation of a features and updates it. Returns the server's representation of the features, and an error, if there is any. -func (c *FakeFeatures) Update(features *config_v1.Features) (result *config_v1.Features, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(featuresResource, features), &config_v1.Features{}) - if obj == nil { - return nil, err - } - return obj.(*config_v1.Features), err -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). -func (c *FakeFeatures) UpdateStatus(features *config_v1.Features) (*config_v1.Features, error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateSubresourceAction(featuresResource, "status", features), &config_v1.Features{}) - if obj == nil { - return nil, err - } - return obj.(*config_v1.Features), err -} - -// Delete takes name of the features and deletes it. Returns an error if one occurs. -func (c *FakeFeatures) Delete(name string, options *v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteAction(featuresResource, name), &config_v1.Features{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeFeatures) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(featuresResource, listOptions) - - _, err := c.Fake.Invokes(action, &config_v1.FeaturesList{}) - return err -} - -// Patch applies the patch and returns the patched features. -func (c *FakeFeatures) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *config_v1.Features, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(featuresResource, name, data, subresources...), &config_v1.Features{}) - if obj == nil { - return nil, err - } - return obj.(*config_v1.Features), err -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go new file mode 100644 index 000000000000..72da006b19eb --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/featuregate.go @@ -0,0 +1,147 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// FeatureGatesGetter has a method to return a FeatureGateInterface. +// A group's client should implement this interface. +type FeatureGatesGetter interface { + FeatureGates() FeatureGateInterface +} + +// FeatureGateInterface has methods to work with FeatureGate resources. +type FeatureGateInterface interface { + Create(*v1.FeatureGate) (*v1.FeatureGate, error) + Update(*v1.FeatureGate) (*v1.FeatureGate, error) + UpdateStatus(*v1.FeatureGate) (*v1.FeatureGate, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.FeatureGate, error) + List(opts meta_v1.ListOptions) (*v1.FeatureGateList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.FeatureGate, err error) + FeatureGateExpansion +} + +// featureGates implements FeatureGateInterface +type featureGates struct { + client rest.Interface +} + +// newFeatureGates returns a FeatureGates +func newFeatureGates(c *ConfigV1Client) *featureGates { + return &featureGates{ + client: c.RESTClient(), + } +} + +// Get takes name of the featureGate, and returns the corresponding featureGate object, and an error if there is any. +func (c *featureGates) Get(name string, options meta_v1.GetOptions) (result *v1.FeatureGate, err error) { + result = &v1.FeatureGate{} + err = c.client.Get(). + Resource("featuregates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of FeatureGates that match those selectors. +func (c *featureGates) List(opts meta_v1.ListOptions) (result *v1.FeatureGateList, err error) { + result = &v1.FeatureGateList{} + err = c.client.Get(). + Resource("featuregates"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested featureGates. +func (c *featureGates) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("featuregates"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a featureGate and creates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *featureGates) Create(featureGate *v1.FeatureGate) (result *v1.FeatureGate, err error) { + result = &v1.FeatureGate{} + err = c.client.Post(). + Resource("featuregates"). + Body(featureGate). + Do(). + Into(result) + return +} + +// Update takes the representation of a featureGate and updates it. Returns the server's representation of the featureGate, and an error, if there is any. +func (c *featureGates) Update(featureGate *v1.FeatureGate) (result *v1.FeatureGate, err error) { + result = &v1.FeatureGate{} + err = c.client.Put(). + Resource("featuregates"). + Name(featureGate.Name). + Body(featureGate). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *featureGates) UpdateStatus(featureGate *v1.FeatureGate) (result *v1.FeatureGate, err error) { + result = &v1.FeatureGate{} + err = c.client.Put(). + Resource("featuregates"). + Name(featureGate.Name). + SubResource("status"). + Body(featureGate). + Do(). + Into(result) + return +} + +// Delete takes name of the featureGate and deletes it. Returns an error if one occurs. +func (c *featureGates) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("featuregates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *featureGates) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("featuregates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched featureGate. +func (c *featureGates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.FeatureGate, err error) { + result = &v1.FeatureGate{} + err = c.client.Patch(pt). + Resource("featuregates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/features.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/features.go deleted file mode 100644 index a6eb5983a6a7..000000000000 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/features.go +++ /dev/null @@ -1,147 +0,0 @@ -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/config/v1" - scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - rest "k8s.io/client-go/rest" -) - -// FeaturesGetter has a method to return a FeaturesInterface. -// A group's client should implement this interface. -type FeaturesGetter interface { - Features() FeaturesInterface -} - -// FeaturesInterface has methods to work with Features resources. -type FeaturesInterface interface { - Create(*v1.Features) (*v1.Features, error) - Update(*v1.Features) (*v1.Features, error) - UpdateStatus(*v1.Features) (*v1.Features, error) - Delete(name string, options *meta_v1.DeleteOptions) error - DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error - Get(name string, options meta_v1.GetOptions) (*v1.Features, error) - List(opts meta_v1.ListOptions) (*v1.FeaturesList, error) - Watch(opts meta_v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Features, err error) - FeaturesExpansion -} - -// features implements FeaturesInterface -type features struct { - client rest.Interface -} - -// newFeatures returns a Features -func newFeatures(c *ConfigV1Client) *features { - return &features{ - client: c.RESTClient(), - } -} - -// Get takes name of the features, and returns the corresponding features object, and an error if there is any. -func (c *features) Get(name string, options meta_v1.GetOptions) (result *v1.Features, err error) { - result = &v1.Features{} - err = c.client.Get(). - Resource("features"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Features that match those selectors. -func (c *features) List(opts meta_v1.ListOptions) (result *v1.FeaturesList, err error) { - result = &v1.FeaturesList{} - err = c.client.Get(). - Resource("features"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested features. -func (c *features) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("features"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a features and creates it. Returns the server's representation of the features, and an error, if there is any. -func (c *features) Create(features *v1.Features) (result *v1.Features, err error) { - result = &v1.Features{} - err = c.client.Post(). - Resource("features"). - Body(features). - Do(). - Into(result) - return -} - -// Update takes the representation of a features and updates it. Returns the server's representation of the features, and an error, if there is any. -func (c *features) Update(features *v1.Features) (result *v1.Features, err error) { - result = &v1.Features{} - err = c.client.Put(). - Resource("features"). - Name(features.Name). - Body(features). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *features) UpdateStatus(features *v1.Features) (result *v1.Features, err error) { - result = &v1.Features{} - err = c.client.Put(). - Resource("features"). - Name(features.Name). - SubResource("status"). - Body(features). - Do(). - Into(result) - return -} - -// Delete takes name of the features and deletes it. Returns an error if one occurs. -func (c *features) Delete(name string, options *meta_v1.DeleteOptions) error { - return c.client.Delete(). - Resource("features"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *features) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { - return c.client.Delete(). - Resource("features"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched features. -func (c *features) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Features, err error) { - result = &v1.Features{} - err = c.client.Patch(pt). - Resource("features"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go index 822a30c35842..ad1005fb7872 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1/generated_expansion.go @@ -16,7 +16,7 @@ type ConsoleExpansion interface{} type DNSExpansion interface{} -type FeaturesExpansion interface{} +type FeatureGateExpansion interface{} type ImageExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/features.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go similarity index 50% rename from vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/features.go rename to vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go index e336b68a8b9b..6641138bc573 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/features.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/featuregate.go @@ -15,58 +15,58 @@ import ( cache "k8s.io/client-go/tools/cache" ) -// FeaturesInformer provides access to a shared informer and lister for -// Features. -type FeaturesInformer interface { +// FeatureGateInformer provides access to a shared informer and lister for +// FeatureGates. +type FeatureGateInformer interface { Informer() cache.SharedIndexInformer - Lister() v1.FeaturesLister + Lister() v1.FeatureGateLister } -type featuresInformer struct { +type featureGateInformer struct { factory internalinterfaces.SharedInformerFactory tweakListOptions internalinterfaces.TweakListOptionsFunc } -// NewFeaturesInformer constructs a new informer for Features type. +// NewFeatureGateInformer constructs a new informer for FeatureGate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFeaturesInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredFeaturesInformer(client, resyncPeriod, indexers, nil) +func NewFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, indexers, nil) } -// NewFilteredFeaturesInformer constructs a new informer for Features type. +// NewFilteredFeatureGateInformer constructs a new informer for FeatureGate type. // Always prefer using an informer factory to get a shared informer instead of getting an independent // one. This reduces memory footprint and number of connections to the server. -func NewFilteredFeaturesInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { +func NewFilteredFeatureGateInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { return cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Features().List(options) + return client.ConfigV1().FeatureGates().List(options) }, WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { if tweakListOptions != nil { tweakListOptions(&options) } - return client.ConfigV1().Features().Watch(options) + return client.ConfigV1().FeatureGates().Watch(options) }, }, - &config_v1.Features{}, + &config_v1.FeatureGate{}, resyncPeriod, indexers, ) } -func (f *featuresInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredFeaturesInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +func (f *featureGateInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredFeatureGateInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) } -func (f *featuresInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&config_v1.Features{}, f.defaultInformer) +func (f *featureGateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&config_v1.FeatureGate{}, f.defaultInformer) } -func (f *featuresInformer) Lister() v1.FeaturesLister { - return v1.NewFeaturesLister(f.Informer().GetIndexer()) +func (f *featureGateInformer) Lister() v1.FeatureGateLister { + return v1.NewFeatureGateLister(f.Informer().GetIndexer()) } diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go index 3c30aef6fe9a..3b6b8a38a1dd 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1/interface.go @@ -22,8 +22,8 @@ type Interface interface { Consoles() ConsoleInformer // DNSes returns a DNSInformer. DNSes() DNSInformer - // Features returns a FeaturesInformer. - Features() FeaturesInformer + // FeatureGates returns a FeatureGateInformer. + FeatureGates() FeatureGateInformer // Images returns a ImageInformer. Images() ImageInformer // Infrastructures returns a InfrastructureInformer. @@ -88,9 +88,9 @@ func (v *version) DNSes() DNSInformer { return &dNSInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } -// Features returns a FeaturesInformer. -func (v *version) Features() FeaturesInformer { - return &featuresInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +// FeatureGates returns a FeatureGateInformer. +func (v *version) FeatureGates() FeatureGateInformer { + return &featureGateInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } // Images returns a ImageInformer. diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index 50ce23d8eefb..248b39101975 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -51,8 +51,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Consoles().Informer()}, nil case v1.SchemeGroupVersion.WithResource("dnses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().DNSes().Informer()}, nil - case v1.SchemeGroupVersion.WithResource("features"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Features().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("featuregates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().FeatureGates().Informer()}, nil case v1.SchemeGroupVersion.WithResource("images"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Images().Informer()}, nil case v1.SchemeGroupVersion.WithResource("infrastructures"): diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go index 99c2acbda11c..94d309605f99 100644 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/expansion_generated.go @@ -30,9 +30,9 @@ type ConsoleListerExpansion interface{} // DNSLister. type DNSListerExpansion interface{} -// FeaturesListerExpansion allows custom methods to be added to -// FeaturesLister. -type FeaturesListerExpansion interface{} +// FeatureGateListerExpansion allows custom methods to be added to +// FeatureGateLister. +type FeatureGateListerExpansion interface{} // ImageListerExpansion allows custom methods to be added to // ImageLister. diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go new file mode 100644 index 000000000000..cb2b6cf16324 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1/featuregate.go @@ -0,0 +1,49 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// FeatureGateLister helps list FeatureGates. +type FeatureGateLister interface { + // List lists all FeatureGates in the indexer. + List(selector labels.Selector) (ret []*v1.FeatureGate, err error) + // Get retrieves the FeatureGate from the index for a given name. + Get(name string) (*v1.FeatureGate, error) + FeatureGateListerExpansion +} + +// featureGateLister implements the FeatureGateLister interface. +type featureGateLister struct { + indexer cache.Indexer +} + +// NewFeatureGateLister returns a new FeatureGateLister. +func NewFeatureGateLister(indexer cache.Indexer) FeatureGateLister { + return &featureGateLister{indexer: indexer} +} + +// List lists all FeatureGates in the indexer. +func (s *featureGateLister) List(selector labels.Selector) (ret []*v1.FeatureGate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.FeatureGate)) + }) + return ret, err +} + +// Get retrieves the FeatureGate from the index for a given name. +func (s *featureGateLister) Get(name string) (*v1.FeatureGate, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("featuregate"), name) + } + return obj.(*v1.FeatureGate), nil +} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1/features.go b/vendor/github.com/openshift/client-go/config/listers/config/v1/features.go deleted file mode 100644 index 1b2dd33bc35a..000000000000 --- a/vendor/github.com/openshift/client-go/config/listers/config/v1/features.go +++ /dev/null @@ -1,49 +0,0 @@ -// Code generated by lister-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "github.com/openshift/api/config/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// FeaturesLister helps list Features. -type FeaturesLister interface { - // List lists all Features in the indexer. - List(selector labels.Selector) (ret []*v1.Features, err error) - // Get retrieves the Features from the index for a given name. - Get(name string) (*v1.Features, error) - FeaturesListerExpansion -} - -// featuresLister implements the FeaturesLister interface. -type featuresLister struct { - indexer cache.Indexer -} - -// NewFeaturesLister returns a new FeaturesLister. -func NewFeaturesLister(indexer cache.Indexer) FeaturesLister { - return &featuresLister{indexer: indexer} -} - -// List lists all Features in the indexer. -func (s *featuresLister) List(selector labels.Selector) (ret []*v1.Features, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1.Features)) - }) - return ret, err -} - -// Get retrieves the Features from the index for a given name. -func (s *featuresLister) Get(name string) (*v1.Features, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1.Resource("features"), name) - } - return obj.(*v1.Features), nil -} diff --git a/vendor/github.com/openshift/client-go/glide.lock b/vendor/github.com/openshift/client-go/glide.lock index f05b9defa625..fd4570ba96b9 100644 --- a/vendor/github.com/openshift/client-go/glide.lock +++ b/vendor/github.com/openshift/client-go/glide.lock @@ -1,5 +1,5 @@ hash: 2bc7f2365f88b3af5dd5bc4bc234144b18e67928f8169661cd2fa2b48c389650 -updated: 2019-03-01T14:01:27.246783073-05:00 +updated: 2019-03-13T15:57:18.934454257-05:00 imports: - name: github.com/davecgh/go-spew version: 782f4967f2dc4564575ca782fe2d04090b5faca8 @@ -49,7 +49,7 @@ imports: - name: github.com/modern-go/reflect2 version: 05fbef0ca5da472bbf96c9322b84a53edc03c9fd - name: github.com/openshift/api - version: b75f8002d9873eb84986b824850058977e116e5b + version: 5e45fff0f89ec1edff19e0ee00a21ecbeb08d787 subpackages: - apps/v1 - authorization/v1 @@ -238,7 +238,7 @@ imports: - util/integer - util/retry - name: k8s.io/code-generator - version: 1bdf8e8a8fde675de375d9d0a8fa77c4034be0a0 + version: 13d81ee386d3d039027dd42e7b3d1d30d8d788f2 repo: https://github.com/openshift/kubernetes-code-generator.git - name: k8s.io/gengo version: 01a732e01d00cb9a81bb0ca050d3e6d2b947927b diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go new file mode 100644 index 000000000000..378859738059 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_ingresscontroller.go @@ -0,0 +1,124 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + operator_v1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeIngressControllers implements IngressControllerInterface +type FakeIngressControllers struct { + Fake *FakeOperatorV1 + ns string +} + +var ingresscontrollersResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "ingresscontrollers"} + +var ingresscontrollersKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "IngressController"} + +// Get takes name of the ingressController, and returns the corresponding ingressController object, and an error if there is any. +func (c *FakeIngressControllers) Get(name string, options v1.GetOptions) (result *operator_v1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(ingresscontrollersResource, c.ns, name), &operator_v1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operator_v1.IngressController), err +} + +// List takes label and field selectors, and returns the list of IngressControllers that match those selectors. +func (c *FakeIngressControllers) List(opts v1.ListOptions) (result *operator_v1.IngressControllerList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(ingresscontrollersResource, ingresscontrollersKind, c.ns, opts), &operator_v1.IngressControllerList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operator_v1.IngressControllerList{ListMeta: obj.(*operator_v1.IngressControllerList).ListMeta} + for _, item := range obj.(*operator_v1.IngressControllerList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested ingressControllers. +func (c *FakeIngressControllers) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(ingresscontrollersResource, c.ns, opts)) + +} + +// Create takes the representation of a ingressController and creates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *FakeIngressControllers) Create(ingressController *operator_v1.IngressController) (result *operator_v1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(ingresscontrollersResource, c.ns, ingressController), &operator_v1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operator_v1.IngressController), err +} + +// Update takes the representation of a ingressController and updates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *FakeIngressControllers) Update(ingressController *operator_v1.IngressController) (result *operator_v1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(ingresscontrollersResource, c.ns, ingressController), &operator_v1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operator_v1.IngressController), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeIngressControllers) UpdateStatus(ingressController *operator_v1.IngressController) (*operator_v1.IngressController, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(ingresscontrollersResource, "status", c.ns, ingressController), &operator_v1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operator_v1.IngressController), err +} + +// Delete takes name of the ingressController and deletes it. Returns an error if one occurs. +func (c *FakeIngressControllers) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(ingresscontrollersResource, c.ns, name), &operator_v1.IngressController{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeIngressControllers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(ingresscontrollersResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &operator_v1.IngressControllerList{}) + return err +} + +// Patch applies the patch and returns the patched ingressController. +func (c *FakeIngressControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *operator_v1.IngressController, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(ingresscontrollersResource, c.ns, name, data, subresources...), &operator_v1.IngressController{}) + + if obj == nil { + return nil, err + } + return obj.(*operator_v1.IngressController), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go new file mode 100644 index 000000000000..a0822dcb911f --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_network.go @@ -0,0 +1,115 @@ +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + operator_v1 "github.com/openshift/api/operator/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeNetworks implements NetworkInterface +type FakeNetworks struct { + Fake *FakeOperatorV1 +} + +var networksResource = schema.GroupVersionResource{Group: "operator.openshift.io", Version: "v1", Resource: "networks"} + +var networksKind = schema.GroupVersionKind{Group: "operator.openshift.io", Version: "v1", Kind: "Network"} + +// Get takes name of the network, and returns the corresponding network object, and an error if there is any. +func (c *FakeNetworks) Get(name string, options v1.GetOptions) (result *operator_v1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(networksResource, name), &operator_v1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operator_v1.Network), err +} + +// List takes label and field selectors, and returns the list of Networks that match those selectors. +func (c *FakeNetworks) List(opts v1.ListOptions) (result *operator_v1.NetworkList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(networksResource, networksKind, opts), &operator_v1.NetworkList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &operator_v1.NetworkList{ListMeta: obj.(*operator_v1.NetworkList).ListMeta} + for _, item := range obj.(*operator_v1.NetworkList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested networks. +func (c *FakeNetworks) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(networksResource, opts)) +} + +// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Create(network *operator_v1.Network) (result *operator_v1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(networksResource, network), &operator_v1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operator_v1.Network), err +} + +// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. +func (c *FakeNetworks) Update(network *operator_v1.Network) (result *operator_v1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(networksResource, network), &operator_v1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operator_v1.Network), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeNetworks) UpdateStatus(network *operator_v1.Network) (*operator_v1.Network, error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateSubresourceAction(networksResource, "status", network), &operator_v1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operator_v1.Network), err +} + +// Delete takes name of the network and deletes it. Returns an error if one occurs. +func (c *FakeNetworks) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteAction(networksResource, name), &operator_v1.Network{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeNetworks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(networksResource, listOptions) + + _, err := c.Fake.Invokes(action, &operator_v1.NetworkList{}) + return err +} + +// Patch applies the patch and returns the patched network. +func (c *FakeNetworks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *operator_v1.Network, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(networksResource, name, data, subresources...), &operator_v1.Network{}) + if obj == nil { + return nil, err + } + return obj.(*operator_v1.Network), err +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go index e9c56582ac0a..0c7e3a3d9656 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/fake/fake_operator_client.go @@ -24,6 +24,10 @@ func (c *FakeOperatorV1) Etcds() v1.EtcdInterface { return &FakeEtcds{c} } +func (c *FakeOperatorV1) IngressControllers(namespace string) v1.IngressControllerInterface { + return &FakeIngressControllers{c, namespace} +} + func (c *FakeOperatorV1) KubeAPIServers() v1.KubeAPIServerInterface { return &FakeKubeAPIServers{c} } @@ -36,6 +40,10 @@ func (c *FakeOperatorV1) KubeSchedulers() v1.KubeSchedulerInterface { return &FakeKubeSchedulers{c} } +func (c *FakeOperatorV1) Networks() v1.NetworkInterface { + return &FakeNetworks{c} +} + func (c *FakeOperatorV1) OpenShiftAPIServers() v1.OpenShiftAPIServerInterface { return &FakeOpenShiftAPIServers{c} } diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go index 358e3f0b89a4..b20aca4be2d9 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/generated_expansion.go @@ -8,12 +8,16 @@ type ConsoleExpansion interface{} type EtcdExpansion interface{} +type IngressControllerExpansion interface{} + type KubeAPIServerExpansion interface{} type KubeControllerManagerExpansion interface{} type KubeSchedulerExpansion interface{} +type NetworkExpansion interface{} + type OpenShiftAPIServerExpansion interface{} type OpenShiftControllerManagerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go new file mode 100644 index 000000000000..d1d4922533e5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/ingresscontroller.go @@ -0,0 +1,158 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// IngressControllersGetter has a method to return a IngressControllerInterface. +// A group's client should implement this interface. +type IngressControllersGetter interface { + IngressControllers(namespace string) IngressControllerInterface +} + +// IngressControllerInterface has methods to work with IngressController resources. +type IngressControllerInterface interface { + Create(*v1.IngressController) (*v1.IngressController, error) + Update(*v1.IngressController) (*v1.IngressController, error) + UpdateStatus(*v1.IngressController) (*v1.IngressController, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.IngressController, error) + List(opts meta_v1.ListOptions) (*v1.IngressControllerList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.IngressController, err error) + IngressControllerExpansion +} + +// ingressControllers implements IngressControllerInterface +type ingressControllers struct { + client rest.Interface + ns string +} + +// newIngressControllers returns a IngressControllers +func newIngressControllers(c *OperatorV1Client, namespace string) *ingressControllers { + return &ingressControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the ingressController, and returns the corresponding ingressController object, and an error if there is any. +func (c *ingressControllers) Get(name string, options meta_v1.GetOptions) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of IngressControllers that match those selectors. +func (c *ingressControllers) List(opts meta_v1.ListOptions) (result *v1.IngressControllerList, err error) { + result = &v1.IngressControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingressControllers. +func (c *ingressControllers) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a ingressController and creates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *ingressControllers) Create(ingressController *v1.IngressController) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Body(ingressController). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingressController and updates it. Returns the server's representation of the ingressController, and an error, if there is any. +func (c *ingressControllers) Update(ingressController *v1.IngressController) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(ingressController.Name). + Body(ingressController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *ingressControllers) UpdateStatus(ingressController *v1.IngressController) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(ingressController.Name). + SubResource("status"). + Body(ingressController). + Do(). + Into(result) + return +} + +// Delete takes name of the ingressController and deletes it. Returns an error if one occurs. +func (c *ingressControllers) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresscontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingressControllers) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresscontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched ingressController. +func (c *ingressControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.IngressController, err error) { + result = &v1.IngressController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresscontrollers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go new file mode 100644 index 000000000000..598efe7b8936 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/network.go @@ -0,0 +1,147 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + scheme "github.com/openshift/client-go/operator/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// NetworksGetter has a method to return a NetworkInterface. +// A group's client should implement this interface. +type NetworksGetter interface { + Networks() NetworkInterface +} + +// NetworkInterface has methods to work with Network resources. +type NetworkInterface interface { + Create(*v1.Network) (*v1.Network, error) + Update(*v1.Network) (*v1.Network, error) + UpdateStatus(*v1.Network) (*v1.Network, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Network, error) + List(opts meta_v1.ListOptions) (*v1.NetworkList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Network, err error) + NetworkExpansion +} + +// networks implements NetworkInterface +type networks struct { + client rest.Interface +} + +// newNetworks returns a Networks +func newNetworks(c *OperatorV1Client) *networks { + return &networks{ + client: c.RESTClient(), + } +} + +// Get takes name of the network, and returns the corresponding network object, and an error if there is any. +func (c *networks) Get(name string, options meta_v1.GetOptions) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Get(). + Resource("networks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Networks that match those selectors. +func (c *networks) List(opts meta_v1.ListOptions) (result *v1.NetworkList, err error) { + result = &v1.NetworkList{} + err = c.client.Get(). + Resource("networks"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networks. +func (c *networks) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("networks"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a network and creates it. Returns the server's representation of the network, and an error, if there is any. +func (c *networks) Create(network *v1.Network) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Post(). + Resource("networks"). + Body(network). + Do(). + Into(result) + return +} + +// Update takes the representation of a network and updates it. Returns the server's representation of the network, and an error, if there is any. +func (c *networks) Update(network *v1.Network) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Put(). + Resource("networks"). + Name(network.Name). + Body(network). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *networks) UpdateStatus(network *v1.Network) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Put(). + Resource("networks"). + Name(network.Name). + SubResource("status"). + Body(network). + Do(). + Into(result) + return +} + +// Delete takes name of the network and deletes it. Returns an error if one occurs. +func (c *networks) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Resource("networks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networks) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Resource("networks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched network. +func (c *networks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Network, err error) { + result = &v1.Network{} + err = c.client.Patch(pt). + Resource("networks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go index b466d17d5377..fddbd3bc3ecb 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/typed/operator/v1/operator_client.go @@ -14,9 +14,11 @@ type OperatorV1Interface interface { AuthenticationsGetter ConsolesGetter EtcdsGetter + IngressControllersGetter KubeAPIServersGetter KubeControllerManagersGetter KubeSchedulersGetter + NetworksGetter OpenShiftAPIServersGetter OpenShiftControllerManagersGetter ServiceCAsGetter @@ -41,6 +43,10 @@ func (c *OperatorV1Client) Etcds() EtcdInterface { return newEtcds(c) } +func (c *OperatorV1Client) IngressControllers(namespace string) IngressControllerInterface { + return newIngressControllers(c, namespace) +} + func (c *OperatorV1Client) KubeAPIServers() KubeAPIServerInterface { return newKubeAPIServers(c) } @@ -53,6 +59,10 @@ func (c *OperatorV1Client) KubeSchedulers() KubeSchedulerInterface { return newKubeSchedulers(c) } +func (c *OperatorV1Client) Networks() NetworkInterface { + return newNetworks(c) +} + func (c *OperatorV1Client) OpenShiftAPIServers() OpenShiftAPIServerInterface { return newOpenShiftAPIServers(c) } diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go index 41cda065f72b..0321bbd36ff3 100644 --- a/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/generic.go @@ -43,12 +43,16 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().Consoles().Informer()}, nil case v1.SchemeGroupVersion.WithResource("etcds"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().Etcds().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("ingresscontrollers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().IngressControllers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("kubeapiservers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().KubeAPIServers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("kubecontrollermanagers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().KubeControllerManagers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("kubeschedulers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().KubeSchedulers().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("networks"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().Networks().Informer()}, nil case v1.SchemeGroupVersion.WithResource("openshiftapiservers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Operator().V1().OpenShiftAPIServers().Informer()}, nil case v1.SchemeGroupVersion.WithResource("openshiftcontrollermanagers"): diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/ingresscontroller.go new file mode 100644 index 000000000000..6b6570d29213 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/ingresscontroller.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + operator_v1 "github.com/openshift/api/operator/v1" + versioned "github.com/openshift/client-go/operator/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/operator/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/operator/listers/operator/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// IngressControllerInformer provides access to a shared informer and lister for +// IngressControllers. +type IngressControllerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.IngressControllerLister +} + +type ingressControllerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIngressControllerInformer constructs a new informer for IngressController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressControllerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressControllerInformer constructs a new informer for IngressController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressControllerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().IngressControllers(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().IngressControllers(namespace).Watch(options) + }, + }, + &operator_v1.IngressController{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressControllerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressControllerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&operator_v1.IngressController{}, f.defaultInformer) +} + +func (f *ingressControllerInformer) Lister() v1.IngressControllerLister { + return v1.NewIngressControllerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go index 4cb3b98bfb86..7c8f3daf18c5 100644 --- a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/interface.go @@ -14,12 +14,16 @@ type Interface interface { Consoles() ConsoleInformer // Etcds returns a EtcdInformer. Etcds() EtcdInformer + // IngressControllers returns a IngressControllerInformer. + IngressControllers() IngressControllerInformer // KubeAPIServers returns a KubeAPIServerInformer. KubeAPIServers() KubeAPIServerInformer // KubeControllerManagers returns a KubeControllerManagerInformer. KubeControllerManagers() KubeControllerManagerInformer // KubeSchedulers returns a KubeSchedulerInformer. KubeSchedulers() KubeSchedulerInformer + // Networks returns a NetworkInformer. + Networks() NetworkInformer // OpenShiftAPIServers returns a OpenShiftAPIServerInformer. OpenShiftAPIServers() OpenShiftAPIServerInformer // OpenShiftControllerManagers returns a OpenShiftControllerManagerInformer. @@ -58,6 +62,11 @@ func (v *version) Etcds() EtcdInformer { return &etcdInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// IngressControllers returns a IngressControllerInformer. +func (v *version) IngressControllers() IngressControllerInformer { + return &ingressControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + // KubeAPIServers returns a KubeAPIServerInformer. func (v *version) KubeAPIServers() KubeAPIServerInformer { return &kubeAPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} @@ -73,6 +82,11 @@ func (v *version) KubeSchedulers() KubeSchedulerInformer { return &kubeSchedulerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} } +// Networks returns a NetworkInformer. +func (v *version) Networks() NetworkInformer { + return &networkInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + // OpenShiftAPIServers returns a OpenShiftAPIServerInformer. func (v *version) OpenShiftAPIServers() OpenShiftAPIServerInformer { return &openShiftAPIServerInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/network.go new file mode 100644 index 000000000000..0507cce1f2c5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/informers/externalversions/operator/v1/network.go @@ -0,0 +1,72 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + operator_v1 "github.com/openshift/api/operator/v1" + versioned "github.com/openshift/client-go/operator/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/operator/informers/externalversions/internalinterfaces" + v1 "github.com/openshift/client-go/operator/listers/operator/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// NetworkInformer provides access to a shared informer and lister for +// Networks. +type NetworkInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NetworkLister +} + +type networkInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkInformer constructs a new informer for Network type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().Networks().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.OperatorV1().Networks().Watch(options) + }, + }, + &operator_v1.Network{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&operator_v1.Network{}, f.defaultInformer) +} + +func (f *networkInformer) Lister() v1.NetworkLister { + return v1.NewNetworkLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go index 4aef92ccdce3..c0ae4f640b70 100644 --- a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/expansion_generated.go @@ -14,6 +14,14 @@ type ConsoleListerExpansion interface{} // EtcdLister. type EtcdListerExpansion interface{} +// IngressControllerListerExpansion allows custom methods to be added to +// IngressControllerLister. +type IngressControllerListerExpansion interface{} + +// IngressControllerNamespaceListerExpansion allows custom methods to be added to +// IngressControllerNamespaceLister. +type IngressControllerNamespaceListerExpansion interface{} + // KubeAPIServerListerExpansion allows custom methods to be added to // KubeAPIServerLister. type KubeAPIServerListerExpansion interface{} @@ -26,6 +34,10 @@ type KubeControllerManagerListerExpansion interface{} // KubeSchedulerLister. type KubeSchedulerListerExpansion interface{} +// NetworkListerExpansion allows custom methods to be added to +// NetworkLister. +type NetworkListerExpansion interface{} + // OpenShiftAPIServerListerExpansion allows custom methods to be added to // OpenShiftAPIServerLister. type OpenShiftAPIServerListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/ingresscontroller.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/ingresscontroller.go new file mode 100644 index 000000000000..51c79227d803 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/ingresscontroller.go @@ -0,0 +1,78 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IngressControllerLister helps list IngressControllers. +type IngressControllerLister interface { + // List lists all IngressControllers in the indexer. + List(selector labels.Selector) (ret []*v1.IngressController, err error) + // IngressControllers returns an object that can list and get IngressControllers. + IngressControllers(namespace string) IngressControllerNamespaceLister + IngressControllerListerExpansion +} + +// ingressControllerLister implements the IngressControllerLister interface. +type ingressControllerLister struct { + indexer cache.Indexer +} + +// NewIngressControllerLister returns a new IngressControllerLister. +func NewIngressControllerLister(indexer cache.Indexer) IngressControllerLister { + return &ingressControllerLister{indexer: indexer} +} + +// List lists all IngressControllers in the indexer. +func (s *ingressControllerLister) List(selector labels.Selector) (ret []*v1.IngressController, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.IngressController)) + }) + return ret, err +} + +// IngressControllers returns an object that can list and get IngressControllers. +func (s *ingressControllerLister) IngressControllers(namespace string) IngressControllerNamespaceLister { + return ingressControllerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IngressControllerNamespaceLister helps list and get IngressControllers. +type IngressControllerNamespaceLister interface { + // List lists all IngressControllers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.IngressController, err error) + // Get retrieves the IngressController from the indexer for a given namespace and name. + Get(name string) (*v1.IngressController, error) + IngressControllerNamespaceListerExpansion +} + +// ingressControllerNamespaceLister implements the IngressControllerNamespaceLister +// interface. +type ingressControllerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all IngressControllers in the indexer for a given namespace. +func (s ingressControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.IngressController, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.IngressController)) + }) + return ret, err +} + +// Get retrieves the IngressController from the indexer for a given namespace and name. +func (s ingressControllerNamespaceLister) Get(name string) (*v1.IngressController, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("ingresscontroller"), name) + } + return obj.(*v1.IngressController), nil +} diff --git a/vendor/github.com/openshift/client-go/operator/listers/operator/v1/network.go b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/network.go new file mode 100644 index 000000000000..735fef9b7fb3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/listers/operator/v1/network.go @@ -0,0 +1,49 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkLister helps list Networks. +type NetworkLister interface { + // List lists all Networks in the indexer. + List(selector labels.Selector) (ret []*v1.Network, err error) + // Get retrieves the Network from the index for a given name. + Get(name string) (*v1.Network, error) + NetworkListerExpansion +} + +// networkLister implements the NetworkLister interface. +type networkLister struct { + indexer cache.Indexer +} + +// NewNetworkLister returns a new NetworkLister. +func NewNetworkLister(indexer cache.Indexer) NetworkLister { + return &networkLister{indexer: indexer} +} + +// List lists all Networks in the indexer. +func (s *networkLister) List(selector labels.Selector) (ret []*v1.Network, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Network)) + }) + return ret, err +} + +// Get retrieves the Network from the index for a given name. +func (s *networkLister) Get(name string) (*v1.Network, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("network"), name) + } + return obj.(*v1.Network), nil +} diff --git a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go index 0c0efc2c38f1..cce6306c3135 100755 --- a/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go +++ b/vendor/github.com/openshift/library-go/cmd/crd-schema-gen/generator/generator.go @@ -67,7 +67,7 @@ func Run() error { return err } - // generate kubebuilder NamedYaml manifests into temp dir + // generate kubebuilder KindGroupYaml manifests into temp dir g := crdgenerator.Generator{ RootPath: tmpDir, Domain: "openshift.io", @@ -100,13 +100,13 @@ func Run() error { existingFileNames := map[string]string{} for fn, crd := range existing { - existingFileNames[crd.Name] = fn + existingFileNames[crd.KindGroup] = fn } // update existing manifests with validations of kubebuilder output dirty := false for fn, withValidation := range fromKubebuilder { - existingFileName, ok := existingFileNames[withValidation.Name] + existingFileName, ok := existingFileNames[withValidation.KindGroup] if !ok { continue } @@ -171,7 +171,7 @@ func Run() error { return fmt.Errorf("failed to set spec.validation in %s: %v", existingFileName, err) } if reflect.DeepEqual(updated, crd.Yaml) { - fmt.Printf("Validation of %s in %s did not change.\n", crd.Name, existingFileName) + fmt.Printf("Validation of %s in %s did not change.\n", crd.KindGroup, existingFileName) continue } @@ -185,7 +185,7 @@ func Run() error { if *verifyOnly { newFn = filepath.Join(tmpDir, filepath.Base(existingFileName)) } else { - fmt.Printf("Updating validation of %s in %s.\n", crd.Name, existingFileName) + fmt.Printf("Updating validation of %s in %s.\n", crd.KindGroup, existingFileName) } if err := ioutil.WriteFile(newFn, bs, 0644); err != nil { return err @@ -305,14 +305,14 @@ func onlyHasNoneOr(x interface{}, pth ...string) bool { } } -type NamedYaml struct { - Name string - Yaml interface{} +type KindGroupYaml struct { + KindGroup string + Yaml interface{} } // crdsFromDirectory returns CRDs by file path -func crdsFromDirectory(dir string) (map[string]NamedYaml, error) { - ret := map[string]NamedYaml{} +func crdsFromDirectory(dir string) (map[string]KindGroupYaml, error) { + ret := map[string]KindGroupYaml{} infos, err := ioutil.ReadDir(dir) if err != nil { return nil, err @@ -343,7 +343,8 @@ func crdsFromDirectory(dir string) (map[string]NamedYaml, error) { fmt.Printf("Warning: failed to unmarshal %q, skipping\n", info.Name()) continue } - ret[filepath.Join(dir, info.Name())] = NamedYaml{crd.Name, y} + key := crd.Spec.Names.Kind + "." + crd.Spec.Group + ret[filepath.Join(dir, info.Name())] = KindGroupYaml{key, y} } if err != nil { return nil, err diff --git a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go index 7c0f5b442d41..1d05075d9d39 100644 --- a/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go +++ b/vendor/github.com/openshift/library-go/pkg/config/leaderelection/leaderelection.go @@ -75,13 +75,13 @@ func LeaderElectionDefaulting(config configv1.LeaderElection, defaultNamespace, ret := *(&config).DeepCopy() if ret.LeaseDuration.Duration == 0 { - ret.LeaseDuration.Duration = 15 * time.Second + ret.LeaseDuration.Duration = 120 * time.Second } if ret.RenewDeadline.Duration == 0 { - ret.RenewDeadline.Duration = 10 * time.Second + ret.RenewDeadline.Duration = 90 * time.Second } if ret.RetryPeriod.Duration == 0 { - ret.RetryPeriod.Duration = 2 * time.Second + ret.RetryPeriod.Duration = 20 * time.Second } if len(ret.Namespace) == 0 { if len(defaultNamespace) > 0 { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go index b8b35aff0a20..d82981d2d71f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_cmd.go @@ -1,6 +1,7 @@ package certsyncpod import ( + "io/ioutil" "os" "time" @@ -13,6 +14,7 @@ import ( "k8s.io/client-go/rest" "github.com/openshift/library-go/pkg/config/client" + "github.com/openshift/library-go/pkg/controller/fileobserver" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/staticpod/controller/revision" ) @@ -24,6 +26,8 @@ type CertSyncControllerOptions struct { configMaps []revision.RevisionResource secrets []revision.RevisionResource + + kubeClient kubernetes.Interface } func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResource) *cobra.Command { @@ -35,11 +39,12 @@ func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResourc cmd := &cobra.Command{ Use: "cert-syncer --kubeconfig=kubeconfigfile", Run: func(cmd *cobra.Command, args []string) { - r, err := o.Complete() - if err != nil { + if err := o.Complete(); err != nil { + glog.Fatal(err) + } + if err := o.Run(); err != nil { glog.Fatal(err) } - r.Run(1, make(chan struct{})) }, } @@ -50,23 +55,23 @@ func NewCertSyncControllerCommand(configmaps, secrets []revision.RevisionResourc return cmd } -func (o *CertSyncControllerOptions) Complete() (*CertSyncController, error) { - kubeConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfigFile, nil) +func (o *CertSyncControllerOptions) Run() error { + // When the kubeconfig content change, commit suicide to reload its content. + observer, err := fileobserver.NewObserver(500 * time.Millisecond) if err != nil { - return nil, err + return err } - protoKubeConfig := rest.CopyConfig(kubeConfig) - protoKubeConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" - protoKubeConfig.ContentType = "application/vnd.kubernetes.protobuf" - // This kube client use protobuf, do not use it for CR - kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) - if err != nil { - return nil, err - } - kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) + initialContent, _ := ioutil.ReadFile(o.KubeConfigFile) + observer.AddReactor(fileobserver.ExitOnChangeReactor, map[string][]byte{o.KubeConfigFile: initialContent}, o.KubeConfigFile) + + stopCh := make(chan struct{}) + go observer.Run(stopCh) - eventRecorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", + kubeInformers := informers.NewSharedInformerFactoryWithOptions(o.kubeClient, 10*time.Minute, informers.WithNamespace(o.Namespace)) + go kubeInformers.Start(stopCh) + + eventRecorder := events.NewKubeRecorder(o.kubeClient.CoreV1().Events(o.Namespace), "cert-syncer", &corev1.ObjectReference{ APIVersion: "v1", Kind: "Pod", @@ -74,7 +79,7 @@ func (o *CertSyncControllerOptions) Complete() (*CertSyncController, error) { Name: os.Getenv("POD_NAME"), }) - return NewCertSyncController( + controller, err := NewCertSyncController( o.DestinationDir, o.Namespace, o.configMaps, @@ -82,4 +87,33 @@ func (o *CertSyncControllerOptions) Complete() (*CertSyncController, error) { kubeInformers, eventRecorder, ) + if err != nil { + return err + } + go controller.Run(1, stopCh) + + <-stopCh + glog.Infof("Shutting down certificate syncer") + + return nil +} + +func (o *CertSyncControllerOptions) Complete() error { + kubeConfig, err := client.GetKubeConfigOrInClusterConfig(o.KubeConfigFile, nil) + if err != nil { + return err + } + + protoKubeConfig := rest.CopyConfig(kubeConfig) + protoKubeConfig.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + protoKubeConfig.ContentType = "application/vnd.kubernetes.protobuf" + + // This kube client use protobuf, do not use it for CR + kubeClient, err := kubernetes.NewForConfig(protoKubeConfig) + if err != nil { + return err + } + o.kubeClient = kubeClient + + return nil } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go index f02871255420..ca83e9ae1228 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller.go @@ -215,6 +215,9 @@ func protectedRevisions(revisions []int, revisionLimit int) []int { } func (c *PruneController) ensurePrunePod(nodeName string, maxEligibleRevision int, protectedRevisions []int, revision int32) error { + if revision == 0 { + return nil + } pod := resourceread.ReadPodV1OrDie(bindata.MustAsset(filepath.Join("pkg/operator/staticpod/controller/prune", "manifests/pruner-pod.yaml"))) pod.Name = getPrunerPodName(nodeName, revision) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go index 3beea4867e4e..d8151d43ec31 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/prune/prune_controller_test.go @@ -194,7 +194,7 @@ func TestPruneAPIResources(t *testing.T) { NodeStatuses: []operatorv1.NodeStatus{ { NodeName: "test-node-1", - CurrentRevision: 0, + CurrentRevision: 1, TargetRevision: 0, }, }, @@ -208,7 +208,7 @@ func TestPruneAPIResources(t *testing.T) { NodeStatuses: []operatorv1.NodeStatus{ { NodeName: "test-node-1", - CurrentRevision: 0, + CurrentRevision: 1, TargetRevision: 0, }, }, @@ -410,7 +410,7 @@ func TestPruneDiskResources(t *testing.T) { NodeStatuses: []operatorv1.NodeStatus{ { NodeName: "test-node-1", - CurrentRevision: 0, + CurrentRevision: 1, TargetRevision: 0, }, }, @@ -424,7 +424,7 @@ func TestPruneDiskResources(t *testing.T) { NodeStatuses: []operatorv1.NodeStatus{ { NodeName: "test-node-1", - CurrentRevision: 0, + CurrentRevision: 1, TargetRevision: 0, }, }, diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go index a8b41591ef79..2b23be2ba51a 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/staticpodstate/staticpodstate_controller.go @@ -107,7 +107,7 @@ func (c *StaticPodStateController) sync() error { // We will still reflect the container not ready state in error conditions, but we don't set the operator as failed. errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is not ready", node.NodeName, pod.Name, containerStatus.Name)) } - if containerStatus.State.Waiting != nil { + if containerStatus.State.Waiting != nil && containerStatus.State.Waiting.Message != "PodInitializing" { errs = append(errs, fmt.Errorf("nodes/%s pods/%s container=%q is waiting: %q - %q", node.NodeName, pod.Name, containerStatus.Name, containerStatus.State.Waiting.Reason, containerStatus.State.Waiting.Message)) failingErrorCount++ } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go index 5b16617ea342..8519087f477f 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller.go @@ -17,7 +17,8 @@ import ( configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" configv1client "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" - + configv1informers "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configv1listers "github.com/openshift/client-go/config/listers/config/v1" configv1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/library-go/pkg/operator/management" @@ -43,6 +44,7 @@ type StatusSyncer struct { versionGetter VersionGetter operatorClient operatorv1helpers.OperatorClient clusterOperatorClient configv1client.ClusterOperatorsGetter + clusterOperatorLister configv1listers.ClusterOperatorLister eventRecorder events.Recorder // queue only ever has one item, but it has nice error handling backoff/retry semantics @@ -53,7 +55,8 @@ func NewClusterOperatorStatusController( name string, relatedObjects []configv1.ObjectReference, clusterOperatorClient configv1client.ClusterOperatorsGetter, - operatorStatusProvider operatorv1helpers.OperatorClient, + clusterOperatorInformer configv1informers.ClusterOperatorInformer, + operatorClient operatorv1helpers.OperatorClient, versionGetter VersionGetter, recorder events.Recorder, ) *StatusSyncer { @@ -62,14 +65,15 @@ func NewClusterOperatorStatusController( relatedObjects: relatedObjects, versionGetter: versionGetter, clusterOperatorClient: clusterOperatorClient, - operatorClient: operatorStatusProvider, + clusterOperatorLister: clusterOperatorInformer.Lister(), + operatorClient: operatorClient, eventRecorder: recorder.WithComponentSuffix("status-controller"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "StatusSyncer-"+name), } - operatorStatusProvider.Informer().AddEventHandler(c.eventHandler()) - // TODO watch clusterOperator.status changes when it moves to openshift/api + operatorClient.Informer().AddEventHandler(c.eventHandler()) + clusterOperatorInformer.Informer().AddEventHandler(c.eventHandler()) return c } @@ -89,7 +93,7 @@ func (c StatusSyncer) sync() error { return err } - originalClusterOperatorObj, err := c.clusterOperatorClient.ClusterOperators().Get(c.clusterOperatorName, metav1.GetOptions{}) + originalClusterOperatorObj, err := c.clusterOperatorLister.Get(c.clusterOperatorName) if err != nil && !apierrors.IsNotFound(err) { c.eventRecorder.Warningf("StatusFailed", "Unable to get current operator status for %s: %v", c.clusterOperatorName, err) return err diff --git a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go index 83a66c4df3ff..30e9a0ff9070 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/status/status_controller_test.go @@ -12,7 +12,7 @@ import ( configv1 "github.com/openshift/api/config/v1" operatorv1 "github.com/openshift/api/operator/v1" "github.com/openshift/client-go/config/clientset/versioned/fake" - + configv1listers "github.com/openshift/client-go/config/listers/config/v1" "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" "github.com/openshift/library-go/pkg/operator/events" ) @@ -91,9 +91,13 @@ func TestFailing(t *testing.T) { } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - clusterOperatorClient := fake.NewSimpleClientset(&configv1.ClusterOperator{ + clusteroperator := &configv1.ClusterOperator{ ObjectMeta: metav1.ObjectMeta{Name: "OPERATOR_NAME", ResourceVersion: "12"}, - }) + } + clusterOperatorClient := fake.NewSimpleClientset(clusteroperator) + + indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) + indexer.Add(clusteroperator) statusClient := &statusClient{ t: t, @@ -104,6 +108,7 @@ func TestFailing(t *testing.T) { controller := &StatusSyncer{ clusterOperatorName: "OPERATOR_NAME", clusterOperatorClient: clusterOperatorClient.ConfigV1(), + clusterOperatorLister: configv1listers.NewClusterOperatorLister(indexer), operatorClient: statusClient, eventRecorder: events.NewInMemoryRecorder("status"), versionGetter: NewVersionGetter(), diff --git a/vendor/github.com/openshift/source-to-image/AUTHORS b/vendor/github.com/openshift/source-to-image/AUTHORS index 66c59d1a3559..2e54fb9f787f 100644 --- a/vendor/github.com/openshift/source-to-image/AUTHORS +++ b/vendor/github.com/openshift/source-to-image/AUTHORS @@ -7,3 +7,4 @@ Dan McPherson Cesar Wong Maciej Szulik Jim Minter +Adam Kaplan diff --git a/vendor/github.com/openshift/source-to-image/pkg/docker/docker.go b/vendor/github.com/openshift/source-to-image/pkg/docker/docker.go index fada549d0ab5..f42105f4df64 100644 --- a/vendor/github.com/openshift/source-to-image/pkg/docker/docker.go +++ b/vendor/github.com/openshift/source-to-image/pkg/docker/docker.go @@ -74,6 +74,8 @@ const containerNamePrefix = "s2i" // containerName creates names for Docker containers launched by S2I. It is // meant to resemble Kubernetes' pkg/kubelet/dockertools.BuildDockerName. func containerName(image string) string { + //Initialize seed + rand.Seed(time.Now().UnixNano()) uid := fmt.Sprintf("%08x", rand.Uint32()) // Replace invalid characters for container name with underscores. image = strings.Map(func(r rune) rune { diff --git a/vendor/github.com/openshift/source-to-image/pkg/docker/docker_test.go b/vendor/github.com/openshift/source-to-image/pkg/docker/docker_test.go index 758983c75a56..a81c8e813ba8 100644 --- a/vendor/github.com/openshift/source-to-image/pkg/docker/docker_test.go +++ b/vendor/github.com/openshift/source-to-image/pkg/docker/docker_test.go @@ -4,7 +4,6 @@ import ( "bytes" "fmt" "io/ioutil" - "math/rand" "os" "path/filepath" "reflect" @@ -22,11 +21,10 @@ import ( ) func TestContainerName(t *testing.T) { - rand.Seed(0) got := containerName("sub.domain.com:5000/repo:tag@sha256:ffffff") - want := "s2i_sub_domain_com_5000_repo_tag_sha256_ffffff_f1f85ff5" - if got != want { - t.Errorf("got %v, want %v", got, want) + want := "s2i_sub_domain_com_5000_repo_tag_sha256_ffffff" + if !strings.Contains(got, want) { + t.Errorf("want %v is not substring of got %v", want, got) } } diff --git a/vendor/github.com/pelletier/go-toml/.gitignore b/vendor/github.com/pelletier/go-toml/.gitignore index 99e38bbc53ff..f1b619018e71 100644 --- a/vendor/github.com/pelletier/go-toml/.gitignore +++ b/vendor/github.com/pelletier/go-toml/.gitignore @@ -1,2 +1 @@ test_program/test_program_bin -fuzz/ diff --git a/vendor/github.com/pelletier/go-toml/.travis.yml b/vendor/github.com/pelletier/go-toml/.travis.yml index c9fbf304bf36..49669116669a 100644 --- a/vendor/github.com/pelletier/go-toml/.travis.yml +++ b/vendor/github.com/pelletier/go-toml/.travis.yml @@ -1,9 +1,9 @@ sudo: false language: go go: - - 1.8.x - - 1.9.x - - 1.10.x + - 1.7.6 + - 1.8.3 + - 1.9 - tip matrix: allow_failures: diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md index 0d357acf35db..2681690d5dca 100644 --- a/vendor/github.com/pelletier/go-toml/README.md +++ b/vendor/github.com/pelletier/go-toml/README.md @@ -57,9 +57,9 @@ type Config struct { } doc := []byte(` -[Postgres] -User = "pelletier" -Password = "mypassword"`) +[postgres] +user = "pelletier" +password = "mypassword"`) config := Config{} toml.Unmarshal(doc, &config) @@ -114,18 +114,6 @@ You have to make sure two kind of tests run: You can run both of them using `./test.sh`. -### Fuzzing - -The script `./fuzz.sh` is available to -run [go-fuzz](https://github.com/dvyukov/go-fuzz) on go-toml. - -## Versioning - -Go-toml follows [Semantic Versioning](http://semver.org/). The supported version -of [TOML](https://github.com/toml-lang/toml) is indicated at the beginning of -this document. The last two major versions of Go are supported -(see [Go Release Policy](https://golang.org/doc/devel/release.html#policy)). - ## License The MIT License (MIT). Read [LICENSE](LICENSE). diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go index 322315b53159..b2d6fc673585 100644 --- a/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go +++ b/vendor/github.com/pelletier/go-toml/cmd/tomljson/main.go @@ -17,12 +17,13 @@ import ( func main() { flag.Usage = func() { - fmt.Fprintln(os.Stderr, "tomljson can be used in two ways:") - fmt.Fprintln(os.Stderr, "Writing to STDIN and reading from STDOUT:") - fmt.Fprintln(os.Stderr, " cat file.toml | tomljson > file.json") - fmt.Fprintln(os.Stderr, "") - fmt.Fprintln(os.Stderr, "Reading from a file name:") - fmt.Fprintln(os.Stderr, " tomljson file.toml") + fmt.Fprintln(os.Stderr, `tomljson can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomljson > file.json + +Reading from a file name: + tomljson file.toml +`) } flag.Parse() os.Exit(processMain(flag.Args(), os.Stdin, os.Stdout, os.Stderr)) diff --git a/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go index 93ab0c962829..36c7e3759c75 100644 --- a/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go +++ b/vendor/github.com/pelletier/go-toml/cmd/tomll/main.go @@ -17,14 +17,15 @@ import ( func main() { flag.Usage = func() { - fmt.Fprintln(os.Stderr, "tomll can be used in two ways:") - fmt.Fprintln(os.Stderr, "Writing to STDIN and reading from STDOUT:") - fmt.Fprintln(os.Stderr, " cat file.toml | tomll > file.toml") - fmt.Fprintln(os.Stderr, "") - fmt.Fprintln(os.Stderr, "Reading and updating a list of files:") - fmt.Fprintln(os.Stderr, " tomll a.toml b.toml c.toml") - fmt.Fprintln(os.Stderr, "") - fmt.Fprintln(os.Stderr, "When given a list of files, tomll will modify all files in place without asking.") + fmt.Fprintln(os.Stderr, `tomll can be used in two ways: +Writing to STDIN and reading from STDOUT: + cat file.toml | tomll > file.toml + +Reading and updating a list of files: + tomll a.toml b.toml c.toml + +When given a list of files, tomll will modify all files in place without asking. +`) } flag.Parse() // read from stdin and print to stdout diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go index d5fd98c0211a..3c89619e8a96 100644 --- a/vendor/github.com/pelletier/go-toml/doc.go +++ b/vendor/github.com/pelletier/go-toml/doc.go @@ -17,7 +17,7 @@ // JSONPath-like queries // // The package github.com/pelletier/go-toml/query implements a system -// similar to JSONPath to quickly retrieve elements of a TOML document using a +// similar to JSONPath to quickly retrive elements of a TOML document using a // single expression. See the package documentation for more information. // package toml diff --git a/vendor/github.com/pelletier/go-toml/doc_test.go b/vendor/github.com/pelletier/go-toml/doc_test.go index 3b8171b22896..a48c04b0178b 100644 --- a/vendor/github.com/pelletier/go-toml/doc_test.go +++ b/vendor/github.com/pelletier/go-toml/doc_test.go @@ -61,24 +61,19 @@ func ExampleMarshal() { type Postgres struct { User string `toml:"user"` Password string `toml:"password"` - Database string `toml:"db" commented:"true" comment:"not used anymore"` } type Config struct { - Postgres Postgres `toml:"postgres" comment:"Postgres configuration"` + Postgres Postgres `toml:"postgres"` } - config := Config{Postgres{User: "pelletier", Password: "mypassword", Database: "old_database"}} + config := Config{Postgres{User: "pelletier", Password: "mypassword"}} b, err := toml.Marshal(config) if err != nil { log.Fatal(err) } fmt.Println(string(b)) // Output: - // # Postgres configuration // [postgres] - // - // # not used anymore - // # db = "old_database" // password = "mypassword" // user = "pelletier" } diff --git a/vendor/github.com/pelletier/go-toml/fuzz.go b/vendor/github.com/pelletier/go-toml/fuzz.go deleted file mode 100644 index 14570c8d3577..000000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package toml - -func Fuzz(data []byte) int { - tree, err := LoadBytes(data) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - str, err := tree.ToTomlString() - if err != nil { - if str != "" { - panic(`str must be "" if there is an error`) - } - panic(err) - } - - tree, err = Load(str) - if err != nil { - if tree != nil { - panic("tree must be nil if there is an error") - } - return 0 - } - - return 1 -} diff --git a/vendor/github.com/pelletier/go-toml/fuzz.sh b/vendor/github.com/pelletier/go-toml/fuzz.sh deleted file mode 100755 index 3204b4c4463a..000000000000 --- a/vendor/github.com/pelletier/go-toml/fuzz.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/sh -set -eu - -go get github.com/dvyukov/go-fuzz/go-fuzz -go get github.com/dvyukov/go-fuzz/go-fuzz-build - -if [ ! -e toml-fuzz.zip ]; then - go-fuzz-build github.com/pelletier/go-toml -fi - -rm -fr fuzz -mkdir -p fuzz/corpus -cp *.toml fuzz/corpus - -go-fuzz -bin=toml-fuzz.zip -workdir=fuzz diff --git a/vendor/github.com/pelletier/go-toml/keysparsing.go b/vendor/github.com/pelletier/go-toml/keysparsing.go index 284db64678b3..d62ca5fd1d74 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing.go @@ -9,14 +9,12 @@ import ( "unicode" ) -// Convert the bare key group string to an array. -// The input supports double quotation to allow "." inside the key name, -// but escape sequences are not supported. Lexers must unescape them beforehand. func parseKey(key string) ([]string, error) { groups := []string{} var buffer bytes.Buffer inQuotes := false wasInQuotes := false + escapeNext := false ignoreSpace := true expectDot := false @@ -27,7 +25,15 @@ func parseKey(key string) ([]string, error) { } ignoreSpace = false } + if escapeNext { + buffer.WriteRune(char) + escapeNext = false + continue + } switch char { + case '\\': + escapeNext = true + continue case '"': if inQuotes { groups = append(groups, buffer.String()) @@ -71,6 +77,9 @@ func parseKey(key string) ([]string, error) { if inQuotes { return nil, errors.New("mismatched quotes") } + if escapeNext { + return nil, errors.New("unfinished escape sequence") + } if buffer.Len() > 0 { groups = append(groups, buffer.String()) } diff --git a/vendor/github.com/pelletier/go-toml/keysparsing_test.go b/vendor/github.com/pelletier/go-toml/keysparsing_test.go index 84cb82604127..1a9ecccaa9f0 100644 --- a/vendor/github.com/pelletier/go-toml/keysparsing_test.go +++ b/vendor/github.com/pelletier/go-toml/keysparsing_test.go @@ -22,10 +22,7 @@ func testResult(t *testing.T, key string, expected []string) { } func testError(t *testing.T, key string, expectedError string) { - res, err := parseKey(key) - if err == nil { - t.Fatalf("Expected error, but succesfully parsed key %s", res) - } + _, err := parseKey(key) if fmt.Sprintf("%s", err) != expectedError { t.Fatalf("Expected error \"%s\", but got \"%s\".", expectedError, err) } @@ -50,10 +47,6 @@ func TestBaseKeyPound(t *testing.T) { func TestQuotedKeys(t *testing.T) { testResult(t, `hello."foo".bar`, []string{"hello", "foo", "bar"}) testResult(t, `"hello!"`, []string{"hello!"}) - testResult(t, `foo."ba.r".baz`, []string{"foo", "ba.r", "baz"}) - - // escape sequences must not be converted - testResult(t, `"hello\tworld"`, []string{`hello\tworld`}) } func TestEmptyKey(t *testing.T) { diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go index d11de428594c..1b6647d66fa1 100644 --- a/vendor/github.com/pelletier/go-toml/lexer.go +++ b/vendor/github.com/pelletier/go-toml/lexer.go @@ -204,14 +204,6 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn { return l.lexFalse } - if l.follow("inf") { - return l.lexInf - } - - if l.follow("nan") { - return l.lexNan - } - if isSpace(next) { l.skip() continue @@ -273,18 +265,6 @@ func (l *tomlLexer) lexFalse() tomlLexStateFn { return l.lexRvalue } -func (l *tomlLexer) lexInf() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenInf) - return l.lexRvalue -} - -func (l *tomlLexer) lexNan() tomlLexStateFn { - l.fastForward(3) - l.emit(tokenNan) - return l.lexRvalue -} - func (l *tomlLexer) lexEqual() tomlLexStateFn { l.next() l.emit(tokenEqual) @@ -297,8 +277,6 @@ func (l *tomlLexer) lexComma() tomlLexStateFn { return l.lexRvalue } -// Parse the key and emits its value without escape sequences. -// bare keys, basic string keys and literal string keys are supported. func (l *tomlLexer) lexKey() tomlLexStateFn { growingString := "" @@ -309,16 +287,7 @@ func (l *tomlLexer) lexKey() tomlLexStateFn { if err != nil { return l.errorf(err.Error()) } - growingString += str - l.next() - continue - } else if r == '\'' { - l.next() - str, err := l.lexLiteralStringAsString(`'`, false) - if err != nil { - return l.errorf(err.Error()) - } - growingString += str + growingString += `"` + str + `"` l.next() continue } else if r == '\n' { @@ -558,7 +527,6 @@ func (l *tomlLexer) lexTableKey() tomlLexStateFn { return l.lexInsideTableKey } -// Parse the key till "]]", but only bare keys are supported func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { for r := l.peek(); r != eof; r = l.peek() { switch r { @@ -582,7 +550,6 @@ func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn { return l.errorf("unclosed table array key") } -// Parse the key till "]" but only bare keys are supported func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn { for r := l.peek(); r != eof; r = l.peek() { switch r { @@ -608,77 +575,11 @@ func (l *tomlLexer) lexRightBracket() tomlLexStateFn { return l.lexRvalue } -type validRuneFn func(r rune) bool - -func isValidHexRune(r rune) bool { - return r >= 'a' && r <= 'f' || - r >= 'A' && r <= 'F' || - r >= '0' && r <= '9' || - r == '_' -} - -func isValidOctalRune(r rune) bool { - return r >= '0' && r <= '7' || r == '_' -} - -func isValidBinaryRune(r rune) bool { - return r == '0' || r == '1' || r == '_' -} - func (l *tomlLexer) lexNumber() tomlLexStateFn { r := l.peek() - - if r == '0' { - follow := l.peekString(2) - if len(follow) == 2 { - var isValidRune validRuneFn - switch follow[1] { - case 'x': - isValidRune = isValidHexRune - case 'o': - isValidRune = isValidOctalRune - case 'b': - isValidRune = isValidBinaryRune - default: - if follow[1] >= 'a' && follow[1] <= 'z' || follow[1] >= 'A' && follow[1] <= 'Z' { - return l.errorf("unknown number base: %s. possible options are x (hex) o (octal) b (binary)", string(follow[1])) - } - } - - if isValidRune != nil { - l.next() - l.next() - digitSeen := false - for { - next := l.peek() - if !isValidRune(next) { - break - } - digitSeen = true - l.next() - } - - if !digitSeen { - return l.errorf("number needs at least one digit") - } - - l.emit(tokenInteger) - - return l.lexRvalue - } - } - } - if r == '+' || r == '-' { l.next() - if l.follow("inf") { - return l.lexInf - } - if l.follow("nan") { - return l.lexNan - } } - pointSeen := false expSeen := false digitSeen := false diff --git a/vendor/github.com/pelletier/go-toml/lexer_test.go b/vendor/github.com/pelletier/go-toml/lexer_test.go index cb4913031361..313b83c5d11a 100644 --- a/vendor/github.com/pelletier/go-toml/lexer_test.go +++ b/vendor/github.com/pelletier/go-toml/lexer_test.go @@ -690,7 +690,7 @@ func TestKeyGroupArray(t *testing.T) { func TestQuotedKey(t *testing.T) { testFlow(t, "\"a b\" = 42", []token{ - {Position{1, 1}, tokenKey, "a b"}, + {Position{1, 1}, tokenKey, "\"a b\""}, {Position{1, 7}, tokenEqual, "="}, {Position{1, 9}, tokenInteger, "42"}, {Position{1, 11}, tokenEOF, ""}, diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go index 671da5564c30..1a3176f97a35 100644 --- a/vendor/github.com/pelletier/go-toml/marshal.go +++ b/vendor/github.com/pelletier/go-toml/marshal.go @@ -4,33 +4,17 @@ import ( "bytes" "errors" "fmt" - "io" "reflect" - "strconv" "strings" "time" ) -const tagKeyMultiline = "multiline" - type tomlOpts struct { name string - comment string - commented bool - multiline bool include bool omitempty bool } -type encOpts struct { - quoteMapKeys bool - arraysOneElementPerLine bool -} - -var encOptsDefaults = encOpts{ - quoteMapKeys: false, -} - var timeType = reflect.TypeOf(time.Time{}) var marshalerType = reflect.TypeOf(new(Marshaler)).Elem() @@ -110,15 +94,8 @@ encoder, except that there is no concept of a Marshaler interface or MarshalTOML function for sub-structs, and currently only definite types can be marshaled (i.e. no `interface{}`). -The following struct annotations are supported: - - toml:"Field" Overrides the field's name to output. - omitempty When set, empty values and groups are not emitted. - comment:"comment" Emits a # comment on the same line. This supports new lines. - commented:"true" Emits the value as commented. - Note that pointers are automatically assigned the "omitempty" option, as TOML -explicitly does not handle null values (saying instead the label should be +explicity does not handle null values (saying instead the label should be dropped). Tree structural types and corresponding marshal types: @@ -138,66 +115,6 @@ Tree primitive types and corresponding marshal types: time.Time time.Time{}, pointers to same */ func Marshal(v interface{}) ([]byte, error) { - return NewEncoder(nil).marshal(v) -} - -// Encoder writes TOML values to an output stream. -type Encoder struct { - w io.Writer - encOpts -} - -// NewEncoder returns a new encoder that writes to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: w, - encOpts: encOptsDefaults, - } -} - -// Encode writes the TOML encoding of v to the stream. -// -// See the documentation for Marshal for details. -func (e *Encoder) Encode(v interface{}) error { - b, err := e.marshal(v) - if err != nil { - return err - } - if _, err := e.w.Write(b); err != nil { - return err - } - return nil -} - -// QuoteMapKeys sets up the encoder to encode -// maps with string type keys with quoted TOML keys. -// -// This relieves the character limitations on map keys. -func (e *Encoder) QuoteMapKeys(v bool) *Encoder { - e.quoteMapKeys = v - return e -} - -// ArraysWithOneElementPerLine sets up the encoder to encode arrays -// with more than one element on multiple lines instead of one. -// -// For example: -// -// A = [1,2,3] -// -// Becomes -// -// A = [ -// 1, -// 2, -// 3, -// ] -func (e *Encoder) ArraysWithOneElementPerLine(v bool) *Encoder { - e.arraysOneElementPerLine = v - return e -} - -func (e *Encoder) marshal(v interface{}) ([]byte, error) { mtype := reflect.TypeOf(v) if mtype.Kind() != reflect.Struct { return []byte{}, errors.New("Only a struct can be marshaled to TOML") @@ -206,21 +123,18 @@ func (e *Encoder) marshal(v interface{}) ([]byte, error) { if isCustomMarshaler(mtype) { return callCustomMarshaler(sval) } - t, err := e.valueToTree(mtype, sval) + t, err := valueToTree(mtype, sval) if err != nil { return []byte{}, err } - - var buf bytes.Buffer - _, err = t.writeTo(&buf, "", "", 0, e.arraysOneElementPerLine) - - return buf.Bytes(), err + s, err := t.ToTomlString() + return []byte(s), err } // Convert given marshal struct or map value to toml tree -func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { +func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) { if mtype.Kind() == reflect.Ptr { - return e.valueToTree(mtype.Elem(), mval.Elem()) + return valueToTree(mtype.Elem(), mval.Elem()) } tval := newTree() switch mtype.Kind() { @@ -229,44 +143,31 @@ func (e *Encoder) valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, er mtypef, mvalf := mtype.Field(i), mval.Field(i) opts := tomlOptions(mtypef) if opts.include && (!opts.omitempty || !isZero(mvalf)) { - val, err := e.valueToToml(mtypef.Type, mvalf) + val, err := valueToToml(mtypef.Type, mvalf) if err != nil { return nil, err } - - tval.SetWithOptions(opts.name, SetOptions{ - Comment: opts.comment, - Commented: opts.commented, - Multiline: opts.multiline, - }, val) + tval.Set(opts.name, val) } } case reflect.Map: for _, key := range mval.MapKeys() { mvalf := mval.MapIndex(key) - val, err := e.valueToToml(mtype.Elem(), mvalf) + val, err := valueToToml(mtype.Elem(), mvalf) if err != nil { return nil, err } - if e.quoteMapKeys { - keyStr, err := tomlValueStringRepresentation(key.String(), "", e.arraysOneElementPerLine) - if err != nil { - return nil, err - } - tval.SetPath([]string{keyStr}, val) - } else { - tval.Set(key.String(), val) - } + tval.Set(key.String(), val) } } return tval, nil } // Convert given marshal slice to slice of Toml trees -func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { +func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) { tval := make([]*Tree, mval.Len(), mval.Len()) for i := 0; i < mval.Len(); i++ { - val, err := e.valueToTree(mtype.Elem(), mval.Index(i)) + val, err := valueToTree(mtype.Elem(), mval.Index(i)) if err != nil { return nil, err } @@ -276,10 +177,10 @@ func (e *Encoder) valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*T } // Convert given marshal slice to slice of toml values -func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { +func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) { tval := make([]interface{}, mval.Len(), mval.Len()) for i := 0; i < mval.Len(); i++ { - val, err := e.valueToToml(mtype.Elem(), mval.Index(i)) + val, err := valueToToml(mtype.Elem(), mval.Index(i)) if err != nil { return nil, err } @@ -289,19 +190,19 @@ func (e *Encoder) valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (int } // Convert given marshal value to toml value -func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { +func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) { if mtype.Kind() == reflect.Ptr { - return e.valueToToml(mtype.Elem(), mval.Elem()) + return valueToToml(mtype.Elem(), mval.Elem()) } switch { case isCustomMarshaler(mtype): return callCustomMarshaler(mval) case isTree(mtype): - return e.valueToTree(mtype, mval) + return valueToTree(mtype, mval) case isTreeSlice(mtype): - return e.valueToTreeSlice(mtype, mval) + return valueToTreeSlice(mtype, mval) case isOtherSlice(mtype): - return e.valueToOtherSlice(mtype, mval) + return valueToOtherSlice(mtype, mval) default: switch mtype.Kind() { case reflect.Bool: @@ -326,16 +227,17 @@ func (e *Encoder) valueToToml(mtype reflect.Type, mval reflect.Value) (interface // Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for // sub-structs, and only definite types can be unmarshaled. func (t *Tree) Unmarshal(v interface{}) error { - d := Decoder{tval: t} - return d.unmarshal(v) -} + mtype := reflect.TypeOf(v) + if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { + return errors.New("Only a pointer to struct can be unmarshaled from TOML") + } -// Marshal returns the TOML encoding of Tree. -// See Marshal() documentation for types mapping table. -func (t *Tree) Marshal() ([]byte, error) { - var buf bytes.Buffer - err := NewEncoder(&buf).Encode(t) - return buf.Bytes(), err + sval, err := valueFromTree(mtype.Elem(), t) + if err != nil { + return err + } + reflect.ValueOf(v).Elem().Set(sval) + return nil } // Unmarshal parses the TOML-encoded data and stores the result in the value @@ -344,10 +246,6 @@ func (t *Tree) Marshal() ([]byte, error) { // sub-structs, and currently only definite types can be unmarshaled to (i.e. no // `interface{}`). // -// The following struct annotations are supported: -// -// toml:"Field" Overrides the field's name to map to. -// // See Marshal() documentation for types mapping table. func Unmarshal(data []byte, v interface{}) error { t, err := LoadReader(bytes.NewReader(data)) @@ -357,52 +255,10 @@ func Unmarshal(data []byte, v interface{}) error { return t.Unmarshal(v) } -// Decoder reads and decodes TOML values from an input stream. -type Decoder struct { - r io.Reader - tval *Tree - encOpts -} - -// NewDecoder returns a new decoder that reads from r. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - r: r, - encOpts: encOptsDefaults, - } -} - -// Decode reads a TOML-encoded value from it's input -// and unmarshals it in the value pointed at by v. -// -// See the documentation for Marshal for details. -func (d *Decoder) Decode(v interface{}) error { - var err error - d.tval, err = LoadReader(d.r) - if err != nil { - return err - } - return d.unmarshal(v) -} - -func (d *Decoder) unmarshal(v interface{}) error { - mtype := reflect.TypeOf(v) - if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct { - return errors.New("Only a pointer to struct can be unmarshaled from TOML") - } - - sval, err := d.valueFromTree(mtype.Elem(), d.tval) - if err != nil { - return err - } - reflect.ValueOf(v).Elem().Set(sval) - return nil -} - // Convert toml tree to marshal struct or map, using marshal type -func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { +func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return unwrapPointer(mtype, tval) } var mval reflect.Value switch mtype.Kind() { @@ -420,7 +276,7 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, continue } val := tval.Get(key) - mvalf, err := d.valueFromToml(mtypef.Type, val) + mvalf, err := valueFromToml(mtypef.Type, val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } @@ -432,9 +288,8 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, case reflect.Map: mval = reflect.MakeMap(mtype) for _, key := range tval.Keys() { - // TODO: path splits key - val := tval.GetPath([]string{key}) - mvalf, err := d.valueFromToml(mtype.Elem(), val) + val := tval.Get(key) + mvalf, err := valueFromToml(mtype.Elem(), val) if err != nil { return mval, formatError(err, tval.GetPosition(key)) } @@ -445,10 +300,10 @@ func (d *Decoder) valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, } // Convert toml value to marshal struct/map slice, using marshal type -func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { +func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromTree(mtype.Elem(), tval[i]) + val, err := valueFromTree(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -458,10 +313,10 @@ func (d *Decoder) valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect. } // Convert toml value to marshal primitive slice, using marshal type -func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { +func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) { mval := reflect.MakeSlice(mtype, len(tval), len(tval)) for i := 0; i < len(tval); i++ { - val, err := d.valueFromToml(mtype.Elem(), tval[i]) + val, err := valueFromToml(mtype.Elem(), tval[i]) if err != nil { return mval, err } @@ -471,86 +326,117 @@ func (d *Decoder) valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (r } // Convert toml value to marshal value, using marshal type -func (d *Decoder) valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { +func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) { if mtype.Kind() == reflect.Ptr { - return d.unwrapPointer(mtype, tval) + return unwrapPointer(mtype, tval) } - - switch tval.(type) { - case *Tree: - if isTree(mtype) { - return d.valueFromTree(mtype, tval.(*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a tree", tval, tval) - case []*Tree: - if isTreeSlice(mtype) { - return d.valueFromTreeSlice(mtype, tval.([]*Tree)) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to trees", tval, tval) - case []interface{}: - if isOtherSlice(mtype) { - return d.valueFromOtherSlice(mtype, tval.([]interface{})) - } - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to a slice", tval, tval) + switch { + case isTree(mtype): + return valueFromTree(mtype, tval.(*Tree)) + case isTreeSlice(mtype): + return valueFromTreeSlice(mtype, tval.([]*Tree)) + case isOtherSlice(mtype): + return valueFromOtherSlice(mtype, tval.([]interface{})) default: switch mtype.Kind() { - case reflect.Bool, reflect.Struct: - val := reflect.ValueOf(tval) - // if this passes for when mtype is reflect.Struct, tval is a time.Time - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + case reflect.Bool: + val, ok := tval.(bool) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval) } - - return val.Convert(mtype), nil - case reflect.String: - val := reflect.ValueOf(tval) - // stupidly, int64 is convertible to string. So special case this. - if !val.Type().ConvertibleTo(mtype) || val.Kind() == reflect.Int64 { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + return reflect.ValueOf(val), nil + case reflect.Int: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval) } - - return val.Convert(mtype), nil - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + return reflect.ValueOf(int(val)), nil + case reflect.Int8: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval) } - if reflect.Indirect(reflect.New(mtype)).OverflowInt(val.Int()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + return reflect.ValueOf(int8(val)), nil + case reflect.Int16: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval) } - - return val.Convert(mtype), nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + return reflect.ValueOf(int16(val)), nil + case reflect.Int32: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval) } - if val.Int() < 0 { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) is negative so does not fit in %v", tval, tval, mtype.String()) + return reflect.ValueOf(int32(val)), nil + case reflect.Int64: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval) } - if reflect.Indirect(reflect.New(mtype)).OverflowUint(uint64(val.Int())) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + return reflect.ValueOf(val), nil + case reflect.Uint: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval) } - - return val.Convert(mtype), nil - case reflect.Float32, reflect.Float64: - val := reflect.ValueOf(tval) - if !val.Type().ConvertibleTo(mtype) { - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v", tval, tval, mtype.String()) + return reflect.ValueOf(uint(val)), nil + case reflect.Uint8: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval) } - if reflect.Indirect(reflect.New(mtype)).OverflowFloat(val.Float()) { - return reflect.ValueOf(nil), fmt.Errorf("%v(%T) would overflow %v", tval, tval, mtype.String()) + return reflect.ValueOf(uint8(val)), nil + case reflect.Uint16: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval) } - - return val.Convert(mtype), nil + return reflect.ValueOf(uint16(val)), nil + case reflect.Uint32: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval) + } + return reflect.ValueOf(uint32(val)), nil + case reflect.Uint64: + val, ok := tval.(int64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval) + } + return reflect.ValueOf(uint64(val)), nil + case reflect.Float32: + val, ok := tval.(float64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval) + } + return reflect.ValueOf(float32(val)), nil + case reflect.Float64: + val, ok := tval.(float64) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval) + } + return reflect.ValueOf(val), nil + case reflect.String: + val, ok := tval.(string) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval) + } + return reflect.ValueOf(val), nil + case reflect.Struct: + val, ok := tval.(time.Time) + if !ok { + return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval) + } + return reflect.ValueOf(val), nil default: - return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to %v(%v)", tval, tval, mtype, mtype.Kind()) + return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind()) } } } -func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { - val, err := d.valueFromToml(mtype.Elem(), tval) +func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) { + val, err := valueFromToml(mtype.Elem(), tval) if err != nil { return reflect.ValueOf(nil), err } @@ -562,13 +448,7 @@ func (d *Decoder) unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.V func tomlOptions(vf reflect.StructField) tomlOpts { tag := vf.Tag.Get("toml") parse := strings.Split(tag, ",") - var comment string - if c := vf.Tag.Get("comment"); c != "" { - comment = c - } - commented, _ := strconv.ParseBool(vf.Tag.Get("commented")) - multiline, _ := strconv.ParseBool(vf.Tag.Get(tagKeyMultiline)) - result := tomlOpts{name: vf.Name, comment: comment, commented: commented, multiline: multiline, include: true, omitempty: false} + result := tomlOpts{vf.Name, true, false} if parse[0] != "" { if parse[0] == "-" && len(parse) == 1 { result.include = false diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.go b/vendor/github.com/pelletier/go-toml/marshal_test.go index 00cbbf31a7b3..dbfc7c1d133b 100644 --- a/vendor/github.com/pelletier/go-toml/marshal_test.go +++ b/vendor/github.com/pelletier/go-toml/marshal_test.go @@ -6,7 +6,6 @@ import ( "fmt" "io/ioutil" "reflect" - "strings" "testing" "time" ) @@ -146,8 +145,8 @@ var docData = testDoc{ Second: &subdoc, }, SubDocList: []testSubDoc{ - {"List.First", 0}, - {"List.Second", 0}, + testSubDoc{"List.First", 0}, + testSubDoc{"List.Second", 0}, }, SubDocPtrs: []*testSubDoc{&subdoc}, } @@ -509,14 +508,6 @@ func TestPointerUnmarshal(t *testing.T) { } } -func TestUnmarshalTypeMismatch(t *testing.T) { - result := pointerMarshalTestStruct{} - err := Unmarshal([]byte("List = 123"), &result) - if !strings.HasPrefix(err.Error(), "(1, 1): Can't convert 123(int64) to []string(slice)") { - t.Errorf("Type mismatch must be reported: got %v", err.Error()) - } -} - type nestedMarshalTestStruct struct { String [][]string //Struct [][]basicMarshalTestSubStruct @@ -530,7 +521,7 @@ var strPtr = []*string{&str1, &str2} var strPtr2 = []*[]*string{&strPtr} var nestedTestData = nestedMarshalTestStruct{ - String: [][]string{{"Five", "Six"}, {"One", "Two"}}, + String: [][]string{[]string{"Five", "Six"}, []string{"One", "Two"}}, StringPtr: &strPtr2, } @@ -607,200 +598,3 @@ func TestNestedCustomMarshaler(t *testing.T) { t.Errorf("Bad nested custom marshaler: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) } } - -var commentTestToml = []byte(` -# it's a comment on type -[postgres] - # isCommented = "dvalue" - noComment = "cvalue" - - # A comment on AttrB with a - # break line - password = "bvalue" - - # A comment on AttrA - user = "avalue" - - [[postgres.My]] - - # a comment on my on typeC - My = "Foo" - - [[postgres.My]] - - # a comment on my on typeC - My = "Baar" -`) - -func TestMarshalComment(t *testing.T) { - type TypeC struct { - My string `comment:"a comment on my on typeC"` - } - type TypeB struct { - AttrA string `toml:"user" comment:"A comment on AttrA"` - AttrB string `toml:"password" comment:"A comment on AttrB with a\n break line"` - AttrC string `toml:"noComment"` - AttrD string `toml:"isCommented" commented:"true"` - My []TypeC - } - type TypeA struct { - TypeB TypeB `toml:"postgres" comment:"it's a comment on type"` - } - - ta := []TypeC{{My: "Foo"}, {My: "Baar"}} - config := TypeA{TypeB{AttrA: "avalue", AttrB: "bvalue", AttrC: "cvalue", AttrD: "dvalue", My: ta}} - result, err := Marshal(config) - if err != nil { - t.Fatal(err) - } - expected := commentTestToml - if !bytes.Equal(result, expected) { - t.Errorf("Bad marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) - } -} - -type mapsTestStruct struct { - Simple map[string]string - Paths map[string]string - Other map[string]float64 - X struct { - Y struct { - Z map[string]bool - } - } -} - -var mapsTestData = mapsTestStruct{ - Simple: map[string]string{ - "one plus one": "two", - "next": "three", - }, - Paths: map[string]string{ - "/this/is/a/path": "/this/is/also/a/path", - "/heloo.txt": "/tmp/lololo.txt", - }, - Other: map[string]float64{ - "testing": 3.9999, - }, - X: struct{ Y struct{ Z map[string]bool } }{ - Y: struct{ Z map[string]bool }{ - Z: map[string]bool{ - "is.Nested": true, - }, - }, - }, -} -var mapsTestToml = []byte(` -[Other] - "testing" = 3.9999 - -[Paths] - "/heloo.txt" = "/tmp/lololo.txt" - "/this/is/a/path" = "/this/is/also/a/path" - -[Simple] - "next" = "three" - "one plus one" = "two" - -[X] - - [X.Y] - - [X.Y.Z] - "is.Nested" = true -`) - -func TestEncodeQuotedMapKeys(t *testing.T) { - var buf bytes.Buffer - if err := NewEncoder(&buf).QuoteMapKeys(true).Encode(mapsTestData); err != nil { - t.Fatal(err) - } - result := buf.Bytes() - expected := mapsTestToml - if !bytes.Equal(result, expected) { - t.Errorf("Bad maps marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, result) - } -} - -func TestDecodeQuotedMapKeys(t *testing.T) { - result := mapsTestStruct{} - err := NewDecoder(bytes.NewBuffer(mapsTestToml)).Decode(&result) - expected := mapsTestData - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(result, expected) { - t.Errorf("Bad maps unmarshal: expected %v, got %v", expected, result) - } -} - -type structArrayNoTag struct { - A struct { - B []int64 - C []int64 - } -} - -func TestMarshalArray(t *testing.T) { - expected := []byte(` -[A] - B = [1,2,3] - C = [1] -`) - - m := structArrayNoTag{ - A: struct { - B []int64 - C []int64 - }{ - B: []int64{1, 2, 3}, - C: []int64{1}, - }, - } - - b, err := Marshal(m) - - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(b, expected) { - t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b) - } -} - -func TestMarshalArrayOnePerLine(t *testing.T) { - expected := []byte(` -[A] - B = [ - 1, - 2, - 3, - ] - C = [1] -`) - - m := structArrayNoTag{ - A: struct { - B []int64 - C []int64 - }{ - B: []int64{1, 2, 3}, - C: []int64{1}, - }, - } - - var buf bytes.Buffer - encoder := NewEncoder(&buf).ArraysWithOneElementPerLine(true) - err := encoder.Encode(m) - - if err != nil { - t.Fatal(err) - } - - b := buf.Bytes() - - if !bytes.Equal(b, expected) { - t.Errorf("Bad arrays marshal: expected\n-----\n%s\n-----\ngot\n-----\n%s\n-----\n", expected, b) - } -} diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go index 2d27599a9993..8ee49cb5649b 100644 --- a/vendor/github.com/pelletier/go-toml/parser.go +++ b/vendor/github.com/pelletier/go-toml/parser.go @@ -5,7 +5,6 @@ package toml import ( "errors" "fmt" - "math" "reflect" "regexp" "strconv" @@ -186,7 +185,10 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { } // assign value to the found table - keyVals := []string{key.val} + keyVals, err := parseKey(key.val) + if err != nil { + p.raiseError(key, "%s", err) + } if len(keyVals) != 1 { p.raiseError(key, "Invalid key") } @@ -203,32 +205,20 @@ func (p *tomlParser) parseAssign() tomlParserStateFn { case *Tree, []*Tree: toInsert = value default: - toInsert = &tomlValue{value: value, position: key.Position} + toInsert = &tomlValue{value, key.Position} } targetNode.values[keyVal] = toInsert return p.parseStart } var numberUnderscoreInvalidRegexp *regexp.Regexp -var hexNumberUnderscoreInvalidRegexp *regexp.Regexp -func numberContainsInvalidUnderscore(value string) error { +func cleanupNumberToken(value string) (string, error) { if numberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in number") - } - return nil -} - -func hexNumberContainsInvalidUnderscore(value string) error { - if hexNumberUnderscoreInvalidRegexp.MatchString(value) { - return errors.New("invalid use of _ in hex number") + return "", errors.New("invalid use of _ in number") } - return nil -} - -func cleanupNumberToken(value string) string { cleanedVal := strings.Replace(value, "_", "", -1) - return cleanedVal + return cleanedVal, nil } func (p *tomlParser) parseRvalue() interface{} { @@ -244,57 +234,21 @@ func (p *tomlParser) parseRvalue() interface{} { return true case tokenFalse: return false - case tokenInf: - if tok.val[0] == '-' { - return math.Inf(-1) - } - return math.Inf(1) - case tokenNan: - return math.NaN() case tokenInteger: - cleanedVal := cleanupNumberToken(tok.val) - var err error - var val int64 - if len(cleanedVal) >= 3 && cleanedVal[0] == '0' { - switch cleanedVal[1] { - case 'x': - err = hexNumberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 16, 64) - case 'o': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 8, 64) - case 'b': - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal[2:], 2, 64) - default: - panic("invalid base") // the lexer should catch this first - } - } else { - err = numberContainsInvalidUnderscore(tok.val) - if err != nil { - p.raiseError(tok, "%s", err) - } - val, err = strconv.ParseInt(cleanedVal, 10, 64) + cleanedVal, err := cleanupNumberToken(tok.val) + if err != nil { + p.raiseError(tok, "%s", err) } + val, err := strconv.ParseInt(cleanedVal, 10, 64) if err != nil { p.raiseError(tok, "%s", err) } return val case tokenFloat: - err := numberContainsInvalidUnderscore(tok.val) + cleanedVal, err := cleanupNumberToken(tok.val) if err != nil { p.raiseError(tok, "%s", err) } - cleanedVal := cleanupNumberToken(tok.val) val, err := strconv.ParseFloat(cleanedVal, 64) if err != nil { p.raiseError(tok, "%s", err) @@ -355,7 +309,7 @@ Loop: } p.getToken() default: - p.raiseError(follow, "unexpected token type in inline table: %s", follow.String()) + p.raiseError(follow, "unexpected token type in inline table: %s", follow.typ.String()) } previous = follow } @@ -425,6 +379,5 @@ func parseToml(flow []token) *Tree { } func init() { - numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d])|_$|^_`) - hexNumberUnderscoreInvalidRegexp = regexp.MustCompile(`(^0x_)|([^\da-f]_|_[^\da-f])|_$|^_`) + numberUnderscoreInvalidRegexp = regexp.MustCompile(`([^\d]_|_[^\d]|_$|^_)`) } diff --git a/vendor/github.com/pelletier/go-toml/parser_test.go b/vendor/github.com/pelletier/go-toml/parser_test.go index ca29c442e43b..508cb65f0172 100644 --- a/vendor/github.com/pelletier/go-toml/parser_test.go +++ b/vendor/github.com/pelletier/go-toml/parser_test.go @@ -2,7 +2,6 @@ package toml import ( "fmt" - "math" "reflect" "testing" "time" @@ -73,17 +72,6 @@ func TestNumberInKey(t *testing.T) { }) } -func TestIncorrectKeyExtraSquareBracket(t *testing.T) { - _, err := Load(`[a]b] -zyx = 42`) - if err == nil { - t.Error("Error should have been returned.") - } - if err.Error() != "(1, 4): unexpected token" { - t.Error("Bad error message:", err.Error()) - } -} - func TestSimpleNumbers(t *testing.T) { tree, err := Load("a = +42\nb = -21\nc = +4.2\nd = -2.1") assertTree(t, tree, err, map[string]interface{}{ @@ -94,78 +82,6 @@ func TestSimpleNumbers(t *testing.T) { }) } -func TestSpecialFloats(t *testing.T) { - tree, err := Load(` -normalinf = inf -plusinf = +inf -minusinf = -inf -normalnan = nan -plusnan = +nan -minusnan = -nan -`) - assertTree(t, tree, err, map[string]interface{}{ - "normalinf": math.Inf(1), - "plusinf": math.Inf(1), - "minusinf": math.Inf(-1), - "normalnan": math.NaN(), - "plusnan": math.NaN(), - "minusnan": math.NaN(), - }) -} - -func TestHexIntegers(t *testing.T) { - tree, err := Load(`a = 0xDEADBEEF`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) - - tree, err = Load(`a = 0xdeadbeef`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) - - tree, err = Load(`a = 0xdead_beef`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(3735928559)}) - - _, err = Load(`a = 0x_1`) - if err.Error() != "(1, 5): invalid use of _ in hex number" { - t.Error("Bad error message:", err.Error()) - } -} - -func TestOctIntegers(t *testing.T) { - tree, err := Load(`a = 0o01234567`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(342391)}) - - tree, err = Load(`a = 0o755`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(493)}) - - _, err = Load(`a = 0o_1`) - if err.Error() != "(1, 5): invalid use of _ in number" { - t.Error("Bad error message:", err.Error()) - } -} - -func TestBinIntegers(t *testing.T) { - tree, err := Load(`a = 0b11010110`) - assertTree(t, tree, err, map[string]interface{}{"a": int64(214)}) - - _, err = Load(`a = 0b_1`) - if err.Error() != "(1, 5): invalid use of _ in number" { - t.Error("Bad error message:", err.Error()) - } -} - -func TestBadIntegerBase(t *testing.T) { - _, err := Load(`a = 0k1`) - if err.Error() != "(1, 5): unknown number base: k. possible options are x (hex) o (octal) b (binary)" { - t.Error("Error should have been returned.") - } -} - -func TestIntegerNoDigit(t *testing.T) { - _, err := Load(`a = 0b`) - if err.Error() != "(1, 5): number needs at least one digit" { - t.Error("Bad error message:", err.Error()) - } -} - func TestNumbersWithUnderscores(t *testing.T) { tree, err := Load("a = 1_000") assertTree(t, tree, err, map[string]interface{}{ @@ -239,36 +155,6 @@ func TestSpaceKey(t *testing.T) { }) } -func TestDoubleQuotedKey(t *testing.T) { - tree, err := Load(` - "key" = "a" - "\t" = "b" - "\U0001F914" = "c" - "\u2764" = "d" - `) - assertTree(t, tree, err, map[string]interface{}{ - "key": "a", - "\t": "b", - "\U0001F914": "c", - "\u2764": "d", - }) -} - -func TestSingleQuotedKey(t *testing.T) { - tree, err := Load(` - 'key' = "a" - '\t' = "b" - '\U0001F914' = "c" - '\u2764' = "d" - `) - assertTree(t, tree, err, map[string]interface{}{ - `key`: "a", - `\t`: "b", - `\U0001F914`: "c", - `\u2764`: "d", - }) -} - func TestStringEscapables(t *testing.T) { tree, err := Load("a = \"a \\n b\"") assertTree(t, tree, err, map[string]interface{}{ @@ -756,7 +642,7 @@ func TestTomlValueStringRepresentation(t *testing.T) { {int64(12345), "12345"}, {uint64(50), "50"}, {float64(123.45), "123.45"}, - {true, "true"}, + {bool(true), "true"}, {"hello world", "\"hello world\""}, {"\b\t\n\f\r\"\\", "\"\\b\\t\\n\\f\\r\\\"\\\\\""}, {"\x05", "\"\\u0005\""}, @@ -766,7 +652,7 @@ func TestTomlValueStringRepresentation(t *testing.T) { "[\"gamma\",\"delta\"]"}, {nil, ""}, } { - result, err := tomlValueStringRepresentation(item.Value, "", false) + result, err := tomlValueStringRepresentation(item.Value) if err != nil { t.Errorf("Test %d - unexpected error: %s", idx, err) } diff --git a/vendor/github.com/pelletier/go-toml/query/doc.go b/vendor/github.com/pelletier/go-toml/query/doc.go index ed63c11096a2..f999fc965117 100644 --- a/vendor/github.com/pelletier/go-toml/query/doc.go +++ b/vendor/github.com/pelletier/go-toml/query/doc.go @@ -139,7 +139,7 @@ // Compiled Queries // // Queries may be executed directly on a Tree object, or compiled ahead -// of time and executed discretely. The former is more convenient, but has the +// of time and executed discretely. The former is more convienent, but has the // penalty of having to recompile the query expression each time. // // // basic query diff --git a/vendor/github.com/pelletier/go-toml/query/parser_test.go b/vendor/github.com/pelletier/go-toml/query/parser_test.go index 312f51ab5a2a..473896a025ed 100644 --- a/vendor/github.com/pelletier/go-toml/query/parser_test.go +++ b/vendor/github.com/pelletier/go-toml/query/parser_test.go @@ -2,13 +2,12 @@ package query import ( "fmt" + "github.com/pelletier/go-toml" "io/ioutil" "sort" "strings" "testing" "time" - - "github.com/pelletier/go-toml" ) type queryTestNode struct { @@ -407,7 +406,8 @@ func TestQueryFilterFn(t *testing.T) { assertQueryPositions(t, string(buff), "$..[?(float)]", - []interface{}{ // no float values in document + []interface{}{ + // no float values in document }) tv, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh index ba6adf3fc7c5..91a889670f0f 100755 --- a/vendor/github.com/pelletier/go-toml/test.sh +++ b/vendor/github.com/pelletier/go-toml/test.sh @@ -1,7 +1,6 @@ #!/bin/bash # fail out of the script if anything here fails set -e -set -o pipefail # set the path to the present working directory export GOPATH=`pwd` @@ -23,6 +22,9 @@ function git_clone() { # Remove potential previous runs rm -rf src test_program_bin toml-test +# Run go vet +go vet ./... + go get github.com/pelletier/go-buffruneio go get github.com/davecgh/go-spew/spew go get gopkg.in/yaml.v2 diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go index 1a9081346679..5581fe0bcc1d 100644 --- a/vendor/github.com/pelletier/go-toml/token.go +++ b/vendor/github.com/pelletier/go-toml/token.go @@ -23,8 +23,6 @@ const ( tokenTrue tokenFalse tokenFloat - tokenInf - tokenNan tokenEqual tokenLeftBracket tokenRightBracket @@ -57,8 +55,6 @@ var tokenTypeNames = []string{ "True", "False", "Float", - "Inf", - "NaN", "=", "[", "]", diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go index 98c185ad0b8e..64f19ed30cb5 100644 --- a/vendor/github.com/pelletier/go-toml/toml.go +++ b/vendor/github.com/pelletier/go-toml/toml.go @@ -11,19 +11,14 @@ import ( ) type tomlValue struct { - value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list - comment string - commented bool - multiline bool - position Position + value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list + position Position } // Tree is the result of the parsing of a TOML file. type Tree struct { - values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree - comment string - commented bool - position Position + values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree + position Position } func newTree() *Tree { @@ -72,15 +67,18 @@ func (t *Tree) Keys() []string { } // Get the value at key in the Tree. -// Key is a dot-separated path (e.g. a.b.c) without single/double quoted strings. -// If you need to retrieve non-bare keys, use GetPath. +// Key is a dot-separated path (e.g. a.b.c). // Returns nil if the path does not exist in the tree. // If keys is of length zero, the current tree is returned. func (t *Tree) Get(key string) interface{} { if key == "" { return t } - return t.GetPath(strings.Split(key, ".")) + comps, err := parseKey(key) + if err != nil { + return nil + } + return t.GetPath(comps) } // GetPath returns the element in the tree indicated by 'keys'. @@ -176,86 +174,17 @@ func (t *Tree) GetDefault(key string, def interface{}) interface{} { return val } -// SetOptions arguments are supplied to the SetWithOptions and SetPathWithOptions functions to modify marshalling behaviour. -// The default values within the struct are valid default options. -type SetOptions struct { - Comment string - Commented bool - Multiline bool -} - -// SetWithOptions is the same as Set, but allows you to provide formatting -// instructions to the key, that will be used by Marshal(). -func (t *Tree) SetWithOptions(key string, opts SetOptions, value interface{}) { - t.SetPathWithOptions(strings.Split(key, "."), opts, value) -} - -// SetPathWithOptions is the same as SetPath, but allows you to provide -// formatting instructions to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithOptions(keys []string, opts SetOptions, value interface{}) { - subtree := t - for _, intermediateKey := range keys[:len(keys)-1] { - nextTree, exists := subtree.values[intermediateKey] - if !exists { - nextTree = newTree() - subtree.values[intermediateKey] = nextTree // add new element here - } - switch node := nextTree.(type) { - case *Tree: - subtree = node - case []*Tree: - // go to most recent element - if len(node) == 0 { - // create element if it does not exist - subtree.values[intermediateKey] = append(node, newTree()) - } - subtree = node[len(node)-1] - } - } - - var toInsert interface{} - - switch value.(type) { - case *Tree: - tt := value.(*Tree) - tt.comment = opts.Comment - toInsert = value - case []*Tree: - toInsert = value - case *tomlValue: - tt := value.(*tomlValue) - tt.comment = opts.Comment - toInsert = tt - default: - toInsert = &tomlValue{value: value, comment: opts.Comment, commented: opts.Commented, multiline: opts.Multiline} - } - - subtree.values[keys[len(keys)-1]] = toInsert -} - // Set an element in the tree. // Key is a dot-separated path (e.g. a.b.c). // Creates all necessary intermediate trees, if needed. func (t *Tree) Set(key string, value interface{}) { - t.SetWithComment(key, "", false, value) -} - -// SetWithComment is the same as Set, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetWithComment(key string, comment string, commented bool, value interface{}) { - t.SetPathWithComment(strings.Split(key, "."), comment, commented, value) + t.SetPath(strings.Split(key, "."), value) } // SetPath sets an element in the tree. // Keys is an array of path elements (e.g. {"a","b","c"}). // Creates all necessary intermediate trees, if needed. func (t *Tree) SetPath(keys []string, value interface{}) { - t.SetPathWithComment(keys, "", false, value) -} - -// SetPathWithComment is the same as SetPath, but allows you to provide comment -// information to the key, that will be reused by Marshal(). -func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, value interface{}) { subtree := t for _, intermediateKey := range keys[:len(keys)-1] { nextTree, exists := subtree.values[intermediateKey] @@ -280,17 +209,13 @@ func (t *Tree) SetPathWithComment(keys []string, comment string, commented bool, switch value.(type) { case *Tree: - tt := value.(*Tree) - tt.comment = comment toInsert = value case []*Tree: toInsert = value case *tomlValue: - tt := value.(*tomlValue) - tt.comment = comment - toInsert = tt + toInsert = value default: - toInsert = &tomlValue{value: value, comment: comment, commented: commented} + toInsert = &tomlValue{value: value} } subtree.values[keys[len(keys)-1]] = toInsert diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go index 79610e9b340c..19d1c0dc665a 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_create.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go @@ -104,7 +104,7 @@ func sliceToTree(object interface{}) (interface{}, error) { } arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue)) } - return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil + return &tomlValue{arrayValue.Interface(), Position{}}, nil } func toTree(object interface{}) (interface{}, error) { @@ -127,7 +127,7 @@ func toTree(object interface{}) (interface{}, error) { } values[key.String()] = newValue } - return &Tree{values: values, position: Position{}}, nil + return &Tree{values, Position{}}, nil } if value.Kind() == reflect.Array || value.Kind() == reflect.Slice { @@ -138,5 +138,5 @@ func toTree(object interface{}) (interface{}, error) { if err != nil { return nil, err } - return &tomlValue{value: simpleValue, position: Position{}}, nil + return &tomlValue{simpleValue, Position{}}, nil } diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create_test.go b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go index 3465a1066f50..1ca108a524bc 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_create_test.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_create_test.go @@ -60,7 +60,7 @@ func TestTreeCreateToTree(t *testing.T) { }, "array": []string{"a", "b", "c"}, "array_uint": []uint{uint(1), uint(2)}, - "array_table": []map[string]interface{}{{"sub_map": 52}}, + "array_table": []map[string]interface{}{map[string]interface{}{"sub_map": 52}}, "array_times": []time.Time{time.Now(), time.Now()}, "map_times": map[string]time.Time{"now": time.Now()}, "custom_string_map_key": map[customString]interface{}{customString("custom"): "custom"}, @@ -97,7 +97,7 @@ func TestTreeCreateToTreeInvalidArrayMemberType(t *testing.T) { } func TestTreeCreateToTreeInvalidTableGroupType(t *testing.T) { - _, err := TreeFromMap(map[string]interface{}{"foo": []map[string]interface{}{{"hello": t}}}) + _, err := TreeFromMap(map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"hello": t}}}) expected := "cannot convert type *testing.T to Tree" if err.Error() != expected { t.Fatalf("expected error %s, got %s", expected, err.Error()) diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go index e4049e29f2a1..ca763ed5863c 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go @@ -12,41 +12,7 @@ import ( "time" ) -// Encodes a string to a TOML-compliant multi-line string value -// This function is a clone of the existing encodeTomlString function, except that whitespace characters -// are preserved. Quotation marks and backslashes are also not escaped. -func encodeMultilineTomlString(value string) string { - var b bytes.Buffer - - for _, rr := range value { - switch rr { - case '\b': - b.WriteString(`\b`) - case '\t': - b.WriteString("\t") - case '\n': - b.WriteString("\n") - case '\f': - b.WriteString(`\f`) - case '\r': - b.WriteString("\r") - case '"': - b.WriteString(`"`) - case '\\': - b.WriteString(`\`) - default: - intRr := uint16(rr) - if intRr < 0x001F { - b.WriteString(fmt.Sprintf("\\u%0.4X", intRr)) - } else { - b.WriteRune(rr) - } - } - } - return b.String() -} - -// Encodes a string to a TOML-compliant string value +// encodes a string to a TOML-compliant string value func encodeTomlString(value string) string { var b bytes.Buffer @@ -78,16 +44,7 @@ func encodeTomlString(value string) string { return b.String() } -func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElementPerLine bool) (string, error) { - // this interface check is added to dereference the change made in the writeTo function. - // That change was made to allow this function to see formatting options. - tv, ok := v.(*tomlValue) - if ok { - v = tv.value - } else { - tv = &tomlValue{} - } - +func tomlValueStringRepresentation(v interface{}) (string, error) { switch value := v.(type) { case uint64: return strconv.FormatUint(value, 10), nil @@ -97,17 +54,14 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen // Ensure a round float does contain a decimal point. Otherwise feeding // the output back to the parser would convert to an integer. if math.Trunc(value) == value { - return strings.ToLower(strconv.FormatFloat(value, 'f', 1, 32)), nil + return strconv.FormatFloat(value, 'f', 1, 32), nil } - return strings.ToLower(strconv.FormatFloat(value, 'f', -1, 32)), nil + return strconv.FormatFloat(value, 'f', -1, 32), nil case string: - if tv.multiline { - return "\"\"\"\n" + encodeMultilineTomlString(value) + "\"\"\"", nil - } return "\"" + encodeTomlString(value) + "\"", nil case []byte: b, _ := v.([]byte) - return tomlValueStringRepresentation(string(b), indent, arraysOneElementPerLine) + return tomlValueStringRepresentation(string(b)) case bool: if value { return "true", nil @@ -122,38 +76,21 @@ func tomlValueStringRepresentation(v interface{}, indent string, arraysOneElemen rv := reflect.ValueOf(v) if rv.Kind() == reflect.Slice { - var values []string + values := []string{} for i := 0; i < rv.Len(); i++ { item := rv.Index(i).Interface() - itemRepr, err := tomlValueStringRepresentation(item, indent, arraysOneElementPerLine) + itemRepr, err := tomlValueStringRepresentation(item) if err != nil { return "", err } values = append(values, itemRepr) } - if arraysOneElementPerLine && len(values) > 1 { - stringBuffer := bytes.Buffer{} - valueIndent := indent + ` ` // TODO: move that to a shared encoder state - - stringBuffer.WriteString("[\n") - - for _, value := range values { - stringBuffer.WriteString(valueIndent) - stringBuffer.WriteString(value) - stringBuffer.WriteString(`,`) - stringBuffer.WriteString("\n") - } - - stringBuffer.WriteString(indent + "]") - - return stringBuffer.String(), nil - } return "[" + strings.Join(values, ",") + "]", nil } return "", fmt.Errorf("unsupported value type %T: %v", v, v) } -func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, arraysOneElementPerLine bool) (int64, error) { +func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) { simpleValuesKeys := make([]string, 0) complexValuesKeys := make([]string, 0) @@ -176,29 +113,12 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, a return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) } - repr, err := tomlValueStringRepresentation(v, indent, arraysOneElementPerLine) + repr, err := tomlValueStringRepresentation(v.value) if err != nil { return bytesCount, err } - if v.comment != "" { - comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n") - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - - var commented string - if v.commented { - commented = "# " - } - writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n") + writtenBytesCount, err := writeStrings(w, indent, k, " = ", repr, "\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err @@ -212,48 +132,28 @@ func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64, a if keyspace != "" { combinedKey = keyspace + "." + combinedKey } - var commented string - if t.commented { - commented = "# " - } switch node := v.(type) { // node has to be of those two types given how keys are sorted above case *Tree: - tv, ok := t.values[k].(*Tree) - if !ok { - return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k]) - } - if tv.comment != "" { - comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1) - start := "# " - if strings.HasPrefix(comment, "#") { - start = "" - } - writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment) - bytesCount += int64(writtenBytesCountComment) - if errc != nil { - return bytesCount, errc - } - } - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n") + writtenBytesCount, err := writeStrings(w, "\n", indent, "[", combinedKey, "]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount) if err != nil { return bytesCount, err } case []*Tree: for _, subTree := range node { - writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n") + writtenBytesCount, err := writeStrings(w, "\n", indent, "[[", combinedKey, "]]\n") bytesCount += int64(writtenBytesCount) if err != nil { return bytesCount, err } - bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount, arraysOneElementPerLine) + bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount) if err != nil { return bytesCount, err } @@ -279,7 +179,7 @@ func writeStrings(w io.Writer, s ...string) (int, error) { // WriteTo encode the Tree as Toml and writes it to the writer w. // Returns the number of bytes written in case of success, or an error if anything happened. func (t *Tree) WriteTo(w io.Writer) (int64, error) { - return t.writeTo(w, "", "", 0, false) + return t.writeTo(w, "", "", 0) } // ToTomlString generates a human-readable representation of the current tree. diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write_test.go b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go index 206203b88e7b..c2a1ce3b74cb 100644 --- a/vendor/github.com/pelletier/go-toml/tomltree_write_test.go +++ b/vendor/github.com/pelletier/go-toml/tomltree_write_test.go @@ -30,7 +30,7 @@ func (f *failingWriter) Write(p []byte) (n int, err error) { f.buffer.Write(p[:toWrite]) f.written = f.failAt - return toWrite, fmt.Errorf("failingWriter failed after writing %d bytes", f.written) + return toWrite, fmt.Errorf("failingWriter failed after writting %d bytes", f.written) } func assertErrorString(t *testing.T, expected string, err error) { @@ -161,13 +161,13 @@ func TestTreeWriteToInvalidTreeSimpleValue(t *testing.T) { } func TestTreeWriteToInvalidTreeTomlValue(t *testing.T) { - tree := Tree{values: map[string]interface{}{"foo": &tomlValue{value: int8(1), comment: "", position: Position{}}}} + tree := Tree{values: map[string]interface{}{"foo": &tomlValue{int8(1), Position{}}}} _, err := tree.ToTomlString() assertErrorString(t, "unsupported value type int8: 1", err) } func TestTreeWriteToInvalidTreeTomlValueArray(t *testing.T) { - tree := Tree{values: map[string]interface{}{"foo": &tomlValue{value: int8(1), comment: "", position: Position{}}}} + tree := Tree{values: map[string]interface{}{"foo": &tomlValue{[]interface{}{int8(1)}, Position{}}}} _, err := tree.ToTomlString() assertErrorString(t, "unsupported value type int8: 1", err) } @@ -176,7 +176,7 @@ func TestTreeWriteToFailingWriterInSimpleValue(t *testing.T) { toml, _ := Load(`a = 2`) writer := failingWriter{failAt: 0, written: 0} _, err := toml.WriteTo(&writer) - assertErrorString(t, "failingWriter failed after writing 0 bytes", err) + assertErrorString(t, "failingWriter failed after writting 0 bytes", err) } func TestTreeWriteToFailingWriterInTable(t *testing.T) { @@ -185,11 +185,11 @@ func TestTreeWriteToFailingWriterInTable(t *testing.T) { a = 2`) writer := failingWriter{failAt: 2, written: 0} _, err := toml.WriteTo(&writer) - assertErrorString(t, "failingWriter failed after writing 2 bytes", err) + assertErrorString(t, "failingWriter failed after writting 2 bytes", err) writer = failingWriter{failAt: 13, written: 0} _, err = toml.WriteTo(&writer) - assertErrorString(t, "failingWriter failed after writing 13 bytes", err) + assertErrorString(t, "failingWriter failed after writting 13 bytes", err) } func TestTreeWriteToFailingWriterInArray(t *testing.T) { @@ -198,11 +198,11 @@ func TestTreeWriteToFailingWriterInArray(t *testing.T) { a = 2`) writer := failingWriter{failAt: 2, written: 0} _, err := toml.WriteTo(&writer) - assertErrorString(t, "failingWriter failed after writing 2 bytes", err) + assertErrorString(t, "failingWriter failed after writting 2 bytes", err) writer = failingWriter{failAt: 15, written: 0} _, err = toml.WriteTo(&writer) - assertErrorString(t, "failingWriter failed after writing 15 bytes", err) + assertErrorString(t, "failingWriter failed after writting 15 bytes", err) } func TestTreeWriteToMapExampleFile(t *testing.T) { @@ -309,24 +309,6 @@ func TestTreeWriteToFloat(t *testing.T) { } } -func TestTreeWriteToSpecialFloat(t *testing.T) { - expected := `a = +inf -b = -inf -c = nan` - - tree, err := Load(expected) - if err != nil { - t.Fatal(err) - } - str, err := tree.ToTomlString() - if err != nil { - t.Fatal(err) - } - if strings.TrimSpace(str) != strings.TrimSpace(expected) { - t.Fatalf("Expected:\n%s\nGot:\n%s", expected, str) - } -} - func BenchmarkTreeToTomlString(b *testing.B) { toml, err := Load(sampleHard) if err != nil { diff --git a/vendor/github.com/pkg/profile/.travis.yml b/vendor/github.com/pkg/profile/.travis.yml index c30530c41167..3c3325196fd7 100644 --- a/vendor/github.com/pkg/profile/.travis.yml +++ b/vendor/github.com/pkg/profile/.travis.yml @@ -3,7 +3,6 @@ go_import_path: github.com/pkg/profile go: - 1.9.x - 1.10.x - - 1.11.x - tip script: diff --git a/vendor/github.com/vmware/photon-controller-go-sdk/photon/client.go b/vendor/github.com/vmware/photon-controller-go-sdk/photon/client.go index 5e0faac7bd6c..fb48c59820b9 100644 --- a/vendor/github.com/vmware/photon-controller-go-sdk/photon/client.go +++ b/vendor/github.com/vmware/photon-controller-go-sdk/photon/client.go @@ -142,8 +142,8 @@ func NewClient(endpoint string, options *ClientOptions, logger *log.Logger) (c * } restClient := &restClient{ - httpClient: &http.Client{Transport: tr}, - logger: logger, + httpClient: &http.Client{Transport: tr}, + logger: logger, UpdateAccessTokenCallback: tokenCallback, } diff --git a/vendor/github.com/vmware/photon-controller-go-sdk/photon/deployments_test.go b/vendor/github.com/vmware/photon-controller-go-sdk/photon/deployments_test.go index dbfc8f85da86..cbbd5f681089 100644 --- a/vendor/github.com/vmware/photon-controller-go-sdk/photon/deployments_test.go +++ b/vendor/github.com/vmware/photon-controller-go-sdk/photon/deployments_test.go @@ -32,7 +32,7 @@ var _ = Describe("Deployment", func() { deploymentSpec = &DeploymentCreateSpec{ ImageDatastores: []string{randomString(10, "go-sdk-deployment-")}, UseImageDatastoreForVms: true, - Auth: &AuthInfo{}, + Auth: &AuthInfo{}, } }) @@ -59,8 +59,8 @@ var _ = Describe("Deployment", func() { mockDeployment := Deployment{ ImageDatastores: deploymentSpec.ImageDatastores, UseImageDatastoreForVms: deploymentSpec.UseImageDatastoreForVms, - Auth: &AuthInfo{}, - NetworkConfiguration: &NetworkConfiguration{Enabled: false}, + Auth: &AuthInfo{}, + NetworkConfiguration: &NetworkConfiguration{Enabled: false}, } server.SetResponseJson(200, mockDeployment) deployment, err := client.Deployments.Get(task.Entity.ID) diff --git a/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/oidcclient_test.go b/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/oidcclient_test.go index 976f6c7afb03..68bc89ad608f 100644 --- a/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/oidcclient_test.go +++ b/vendor/github.com/vmware/photon-controller-go-sdk/photon/lightwave/oidcclient_test.go @@ -66,7 +66,7 @@ var _ = Describe("OIDCClient", func() { Context("when server responds with valid certificate", func() { BeforeEach(func() { template := &x509.Certificate{ - IsCA: true, + IsCA: true, BasicConstraintsValid: true, SubjectKeyId: []byte{1, 2, 3}, SerialNumber: big.NewInt(1234), diff --git a/vendor/gopkg.in/asn1-ber.v1/.travis.yml b/vendor/gopkg.in/asn1-ber.v1/.travis.yml index ecf413251b94..53063d07598d 100644 --- a/vendor/gopkg.in/asn1-ber.v1/.travis.yml +++ b/vendor/gopkg.in/asn1-ber.v1/.travis.yml @@ -1,31 +1,13 @@ language: go -matrix: - include: - - go: 1.2.x - env: GOOS=linux GOARCH=amd64 - - go: 1.2.x - env: GOOS=linux GOARCH=386 - - go: 1.2.x - env: GOOS=windows GOARCH=amd64 - - go: 1.2.x - env: GOOS=windows GOARCH=386 - - go: 1.3.x - - go: 1.4.x - - go: 1.5.x - - go: 1.6.x - - go: 1.7.x - - go: 1.8.x - - go: 1.9.x - - go: 1.10.x - - go: 1.11.x - env: GOOS=linux GOARCH=amd64 - - go: 1.11.x - env: GOOS=linux GOARCH=386 - - go: 1.11.x - env: GOOS=windows GOARCH=amd64 - - go: 1.11.x - env: GOOS=windows GOARCH=386 - - go: tip +go: + - 1.2 + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - 1.8 + - tip go_import_path: gopkg.in/asn-ber.v1 install: - go list -f '{{range .Imports}}{{.}} {{end}}' ./... | xargs go get -v @@ -33,4 +15,4 @@ install: - go get code.google.com/p/go.tools/cmd/cover || go get golang.org/x/tools/cmd/cover - go build -v ./... script: - - go test -v -cover ./... || go test -v ./... + - go test -v -cover ./... diff --git a/vendor/gopkg.in/asn1-ber.v1/ber.go b/vendor/gopkg.in/asn1-ber.v1/ber.go index 6153f460688c..25cc921be9c8 100644 --- a/vendor/gopkg.in/asn1-ber.v1/ber.go +++ b/vendor/gopkg.in/asn1-ber.v1/ber.go @@ -5,15 +5,10 @@ import ( "errors" "fmt" "io" - "math" "os" "reflect" ) -// MaxPacketLengthBytes specifies the maximum allowed packet size when calling ReadPacket or DecodePacket. Set to 0 for -// no limit. -var MaxPacketLengthBytes int64 = math.MaxInt32 - type Packet struct { Identifier Value interface{} @@ -212,7 +207,7 @@ func DecodeString(data []byte) string { return string(data) } -func ParseInt64(bytes []byte) (ret int64, err error) { +func parseInt64(bytes []byte) (ret int64, err error) { if len(bytes) > 8 { // We'll overflow an int64 in this case. err = fmt.Errorf("integer too large") @@ -335,9 +330,6 @@ func readPacket(reader io.Reader) (*Packet, int, error) { } // Read definite-length content - if MaxPacketLengthBytes > 0 && int64(length) > MaxPacketLengthBytes { - return nil, read, fmt.Errorf("length %d greater than maximum %d", length, MaxPacketLengthBytes) - } content := make([]byte, length, length) if length > 0 { _, err := io.ReadFull(reader, content) @@ -357,11 +349,11 @@ func readPacket(reader io.Reader) (*Packet, int, error) { switch p.Tag { case TagEOC: case TagBoolean: - val, _ := ParseInt64(content) + val, _ := parseInt64(content) p.Value = val != 0 case TagInteger: - p.Value, _ = ParseInt64(content) + p.Value, _ = parseInt64(content) case TagBitString: case TagOctetString: // the actual string encoding is not known here @@ -374,7 +366,7 @@ func readPacket(reader io.Reader) (*Packet, int, error) { case TagExternal: case TagRealFloat: case TagEnumerated: - p.Value, _ = ParseInt64(content) + p.Value, _ = parseInt64(content) case TagEmbeddedPDV: case TagUTF8String: p.Value = DecodeString(content) diff --git a/vendor/gopkg.in/asn1-ber.v1/ber_test.go b/vendor/gopkg.in/asn1-ber.v1/ber_test.go index a5cc53d64d79..bbd22db6d70d 100644 --- a/vendor/gopkg.in/asn1-ber.v1/ber_test.go +++ b/vendor/gopkg.in/asn1-ber.v1/ber_test.go @@ -2,20 +2,21 @@ package ber import ( "bytes" - "io" "math" + + "io" "testing" ) func TestEncodeDecodeInteger(t *testing.T) { for _, v := range []int64{0, 10, 128, 1024, math.MaxInt64, -1, -100, -128, -1024, math.MinInt64} { enc := encodeInteger(v) - dec, err := ParseInt64(enc) + dec, err := parseInt64(enc) if err != nil { t.Fatalf("Error decoding %d : %s", v, err) } if v != dec { - t.Errorf("TestEncodeDecodeInteger failed for %d (got %d)", v, dec) + t.Error("TestEncodeDecodeInteger failed for %d (got %d)", v, dec) } } diff --git a/vendor/gopkg.in/asn1-ber.v1/header.go b/vendor/gopkg.in/asn1-ber.v1/header.go index 71615621cb41..123744e9b877 100644 --- a/vendor/gopkg.in/asn1-ber.v1/header.go +++ b/vendor/gopkg.in/asn1-ber.v1/header.go @@ -2,7 +2,6 @@ package ber import ( "errors" - "fmt" "io" ) @@ -26,10 +25,5 @@ func readHeader(reader io.Reader) (identifier Identifier, length int, read int, return Identifier{}, 0, read, errors.New("indefinite length used with primitive type") } - if length < LengthIndefinite { - err = fmt.Errorf("length cannot be less than %d", LengthIndefinite) - return - } - return identifier, length, read, nil } diff --git a/vendor/gopkg.in/asn1-ber.v1/identifier.go b/vendor/gopkg.in/asn1-ber.v1/identifier.go index e8c435749a68..f7672a8447a0 100644 --- a/vendor/gopkg.in/asn1-ber.v1/identifier.go +++ b/vendor/gopkg.in/asn1-ber.v1/identifier.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "math" ) func readIdentifier(reader io.Reader) (Identifier, int, error) { @@ -79,34 +80,24 @@ func encodeIdentifier(identifier Identifier) []byte { tag := identifier.Tag - b = append(b, encodeHighTag(tag)...) - } - return b -} - -func encodeHighTag(tag Tag) []byte { - // set cap=4 to hopefully avoid additional allocations - b := make([]byte, 0, 4) - for tag != 0 { - // t := last 7 bits of tag (HighTagValueBitmask = 0x7F) - t := tag & HighTagValueBitmask - - // right shift tag 7 to remove what was just pulled off - tag >>= 7 - - // if b already has entries this entry needs a continuation bit (0x80) - if len(b) != 0 { - t |= HighTagContinueBitmask + highBit := uint(63) + for { + if tag&(1<= 0; i-- { + offset := uint(i) * 7 + mask := Tag(0x7f) << offset + tagByte := (tag & mask) >> offset + if i != 0 { + tagByte |= 0x80 + } + b = append(b, byte(tagByte)) + } } return b } diff --git a/vendor/gopkg.in/asn1-ber.v1/identifier_test.go b/vendor/gopkg.in/asn1-ber.v1/identifier_test.go index b0b980944267..7169362e2335 100644 --- a/vendor/gopkg.in/asn1-ber.v1/identifier_test.go +++ b/vendor/gopkg.in/asn1-ber.v1/identifier_test.go @@ -342,22 +342,3 @@ func TestEncodeIdentifier(t *testing.T) { } } } - -func TestEncodeHighTag(t *testing.T) { - cases := []struct { - tag Tag - want []byte - }{ - {134, []byte{0x80 + 0x01, 0x06}}, - {123456, []byte{0x80 + 0x07, 0x80 + 0x44, 0x40}}, - {0xFF, []byte{0x81, 0x7F}}, - } - - for _, c := range cases { - got := encodeHighTag(c.tag) - - if !bytes.Equal(c.want, got) { - t.Errorf("tag: %d want: %#v got: %#v", c.tag, c.want, got) - } - } -} diff --git a/vendor/gopkg.in/asn1-ber.v1/suite_test.go b/vendor/gopkg.in/asn1-ber.v1/suite_test.go index 4f1078db1351..ace8e6705e15 100644 --- a/vendor/gopkg.in/asn1-ber.v1/suite_test.go +++ b/vendor/gopkg.in/asn1-ber.v1/suite_test.go @@ -2,10 +2,8 @@ package ber import ( "bytes" - "fmt" "io" "io/ioutil" - "math" "testing" ) @@ -80,18 +78,6 @@ var testcases = []struct { {File: "tests/tc46.ber", Error: "indefinite length used with primitive type"}, {File: "tests/tc47.ber", Error: "eoc child not allowed with definite length"}, {File: "tests/tc48.ber", Error: "", IndefiniteEncoding: true}, // Error: "Using of more than 7 "unused bits" in BIT STRING with constrictive encoding form" - {File: "tests/tc49.ber", Error: ""}, - {File: "tests/tc50.ber", Error: is64bit("length cannot be less than -1", "long-form length overflow")}, - {File: "tests/tc51.ber", Error: is64bit(fmt.Sprintf("length 206966894640 greater than maximum %v", MaxPacketLengthBytes), "long-form length overflow")}, -} - -func is64bit(a, b string) string { - maxInt64 := int64(math.MaxInt64) - length := int(maxInt64) - if int64(length) != maxInt64 { - return b - } - return a } func TestSuiteDecodePacket(t *testing.T) { @@ -128,7 +114,7 @@ func TestSuiteDecodePacket(t *testing.T) { } } else if !bytes.Equal(dataOut, dataIn) { // Make sure the serialized data matches the source - t.Errorf("%s: data should be the same\nwant: %#v\ngot: %#v", file, dataIn, dataOut) + t.Errorf("%s: data should be the same", file) } packet, err = DecodePacketErr(dataOut) @@ -140,7 +126,7 @@ func TestSuiteDecodePacket(t *testing.T) { // Make sure the re-serialized data matches our original serialization dataOut2 := packet.Bytes() if !bytes.Equal(dataOut, dataOut2) { - t.Errorf("%s: data should be the same\nwant: %#v\ngot: %#v", file, dataOut, dataOut2) + t.Errorf("%s: data should be the same", file) } } } @@ -178,7 +164,7 @@ func TestSuiteReadPacket(t *testing.T) { } } else if !bytes.Equal(dataOut, dataIn) { // Make sure the serialized data matches the source - t.Errorf("%s: data should be the same\nwant: %#v\ngot: %#v", file, dataIn, dataOut) + t.Errorf("%s: data should be the same", file) } packet, err = DecodePacketErr(dataOut) @@ -190,7 +176,7 @@ func TestSuiteReadPacket(t *testing.T) { // Make sure the re-serialized data matches our original serialization dataOut2 := packet.Bytes() if !bytes.Equal(dataOut, dataOut2) { - t.Errorf("%s: data should be the same\nwant: %#v\ngot: %#v", file, dataOut, dataOut2) + t.Errorf("%s: data should be the same", file) } } } diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc49.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc49.ber deleted file mode 100644 index fa20c061fd54..000000000000 Binary files a/vendor/gopkg.in/asn1-ber.v1/tests/tc49.ber and /dev/null differ diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc50.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc50.ber deleted file mode 100644 index 73ff59d56f4e..000000000000 --- a/vendor/gopkg.in/asn1-ber.v1/tests/tc50.ber +++ /dev/null @@ -1 +0,0 @@ -ˆ›0000000 \ No newline at end of file diff --git a/vendor/gopkg.in/asn1-ber.v1/tests/tc51.ber b/vendor/gopkg.in/asn1-ber.v1/tests/tc51.ber deleted file mode 100644 index 81321d017a7e..000000000000 --- a/vendor/gopkg.in/asn1-ber.v1/tests/tc51.ber +++ /dev/null @@ -1 +0,0 @@ -…00000 \ No newline at end of file