diff --git a/Makefile b/Makefile index 4c3a51c80..f9396802b 100644 --- a/Makefile +++ b/Makefile @@ -36,6 +36,8 @@ clean: CRD_SCHEMA_GEN_APIS := $(shell echo ./vendor/github.com/openshift/api/{authorization/v1,config/v1,quota/v1,security/v1,operator/v1alpha1,console/v1}) CRD_SCHEMA_GEN_VERSION :=v0.2.1 +$(call add-crd-gen,manifests,$(CRD_SCHEMA_GEN_APIS),./manifests,./manifests) + update-codegen: update-codegen-crds .PHONY: update-codegen diff --git a/glide.lock b/glide.lock index 4e61fdd8a..c5fad0495 100644 --- a/glide.lock +++ b/glide.lock @@ -1,16 +1,16 @@ hash: 9e0a3d4458afc2413e09446c7ec5943f2d66445a4f9c662e1e24fb6a9d1240e4 -updated: 2019-10-01T14:22:17.125248+02:00 +updated: 2019-10-15T11:50:11.701651-04:00 imports: - name: github.com/beorn7/perks - version: 4b2b341e8d7715fae06375aa633dbb6e91b3fb46 + version: 3a771d992973f24aa725d07868b467d1ddfceafb subpackages: - quantile - name: github.com/davecgh/go-spew - version: d8f796af33cc11cb798c1aaeb27a4ebc5099927d + version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 subpackages: - spew - name: github.com/ghodss/yaml - version: 25d852aebe32c875e9c044af3eef9c7dc6bc777f + version: 73d445a93680fa1a78ae23a5839bad48f32ba1ee - name: github.com/gogo/protobuf version: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0 subpackages: @@ -51,12 +51,12 @@ imports: - name: github.com/modern-go/reflect2 version: 94122c33edd36123c84d5368cfb2b69df93a0ec8 - name: github.com/openshift/api - version: d35a8e0fef2b28744b7034f0945a847f880f83ac + version: 538f3e8cc15a8fe444307fe0b30549ba05efccf2 subpackages: - config/v1 - operator/v1 - name: github.com/openshift/library-go - version: c355e2019bb35b1ae2b060208fdad8b488c28838 + version: e7604f697814119030413cce8e3638dcdef69c63 subpackages: - pkg/assets - pkg/config/client @@ -73,11 +73,11 @@ imports: - prometheus - prometheus/internal - name: github.com/prometheus/client_model - version: fd36f4220a901265f90734c3183c5f0c91daa0b8 + version: 5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f subpackages: - go - name: github.com/prometheus/common - version: 17f5ca1748182ddf24fc33a5a7caaaf790a52fcc + version: 4724e9255275ce38f7179b2478abeae4e28c904f subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg @@ -293,7 +293,7 @@ imports: - util/keyutil - util/retry - name: k8s.io/component-base - version: 804254d5600483f35d94e1cab8b10d40310def3d + version: 547f6c5d70902c6683e93ad96f84adc6b943aedf subpackages: - cli/flag - logs @@ -304,7 +304,7 @@ imports: subpackages: - pkg/util/proto - name: k8s.io/utils - version: 5008bf6f8cd62f4b52816cfa99163fedb053d0be + version: 581e00157fb1a0435d4fac54a52d1ca1e481d60e subpackages: - integer - pointer diff --git a/manifests/0000_10_config-operator_01_apiserver.crd.yaml b/manifests/0000_10_config-operator_01_apiserver.crd.yaml index 8590ce923..5d38dc571 100644 --- a/manifests/0000_10_config-operator_01_apiserver.crd.yaml +++ b/manifests/0000_10_config-operator_01_apiserver.crd.yaml @@ -132,5 +132,78 @@ spec: description: name is the metadata.name of the referenced secret type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for externally exposed servers. \n If unset, a default (which may + change between releases) is chosen." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators may + remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + \n minTLSVersion: TLSv1.1" + type: string + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n + https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3" + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 + \ - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - + ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA + \ - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 + \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: + TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found to + be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced." + type: string status: type: object diff --git a/manifests/0000_10_config-operator_01_consoleclidownload.crd.yaml b/manifests/0000_10_config-operator_01_consoleclidownload.crd.yaml index c1a50cfb3..46495755f 100644 --- a/manifests/0000_10_config-operator_01_consoleclidownload.crd.yaml +++ b/manifests/0000_10_config-operator_01_consoleclidownload.crd.yaml @@ -71,17 +71,15 @@ spec: details. type: array items: - description: Represents a standard link that could be generated in - HTML type: object required: - href - - text properties: href: description: href is the absolute secure URL for the link (must use https) type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ text: description: text is the display text for the link type: string diff --git a/manifests/0000_10_config-operator_01_consoleexternalloglink.crd.yaml b/manifests/0000_10_config-operator_01_consoleexternalloglink.crd.yaml index bd1cd4ce5..21c2490a2 100644 --- a/manifests/0000_10_config-operator_01_consoleexternalloglink.crd.yaml +++ b/manifests/0000_10_config-operator_01_consoleexternalloglink.crd.yaml @@ -69,10 +69,12 @@ spec: UID of the resource which contains the logs - e.g. `11111111-2222-3333-4444-555555555555` - ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains - the logs - ${podLabels} - JSON representation of labels matching the - pod with the logs - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}` + the logs - ${resourceNamespaceUID} - namespace UID of the resource + that contains the logs - ${podLabels} - JSON representation of labels + matching the pod with the logs - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}` \n e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}" type: string + pattern: ^https:// namespaceFilter: description: namespaceFilter is a regular expression used to restrict a log link to a matching set of namespaces (e.g., `^openshift-`). diff --git a/manifests/0000_10_config-operator_01_consolelink.crd.yaml b/manifests/0000_10_config-operator_01_consolelink.crd.yaml index 746a95350..0ba3c363f 100644 --- a/manifests/0000_10_config-operator_01_consolelink.crd.yaml +++ b/manifests/0000_10_config-operator_01_consolelink.crd.yaml @@ -77,16 +77,21 @@ spec: type: string section: description: section is the section of the application menu in which - the link should appear. + the link should appear. This can be any text that will appear + as a subheading in the application menu dropdown. A new section + will be created if the text does not match text of an existing + section. type: string href: description: href is the absolute secure URL for the link (must use https) type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ location: description: location determines which location in the console the link - will be appended to. + will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). type: string + pattern: ^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$ namespaceDashboard: description: namespaceDashboard holds information about namespaces in which the dashboard link should appear, and it is applicable only diff --git a/manifests/0000_10_config-operator_01_consolenotification.crd.yaml b/manifests/0000_10_config-operator_01_consolenotification.crd.yaml index ce5894b71..673ef8b28 100644 --- a/manifests/0000_10_config-operator_01_consolenotification.crd.yaml +++ b/manifests/0000_10_config-operator_01_consolenotification.crd.yaml @@ -76,12 +76,14 @@ spec: description: href is the absolute secure URL for the link (must use https) type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ text: description: text is the display text for the link type: string location: description: location is the location of the notification in the console. type: string + pattern: ^(BannerTop|BannerBottom|BannerTopBottom)$ text: description: text is the visible text of the notification. type: string diff --git a/manifests/0000_10_config-operator_01_infrastructure.crd.yaml b/manifests/0000_10_config-operator_01_infrastructure.crd.yaml index 312515459..ebdb1d56d 100644 --- a/manifests/0000_10_config-operator_01_infrastructure.crd.yaml +++ b/manifests/0000_10_config-operator_01_infrastructure.crd.yaml @@ -101,6 +101,11 @@ spec: provider. type: object properties: + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group + for network resources like the Virtual Network and Subnets + used by the cluster. If empty, the value is same as ResourceGroupName. + type: string resourceGroupName: description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. @@ -176,6 +181,33 @@ spec: a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components inside + the cluster, like kubelets using the infrastructure rather + than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer in + front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target of + a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS + used by the nodes. Unlike the one managed by the DNS operator, + `NodeDNSIP` provides name resolution for the nodes themselves. + There is no DNS-as-a-service for oVirt deployments. In order + to minimize necessary changes to the datacenter DNS, a DNS + service is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string type: description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation diff --git a/vendor/github.com/beorn7/perks/VERSION b/vendor/github.com/beorn7/perks/VERSION deleted file mode 100644 index 3eefcb9dd..000000000 --- a/vendor/github.com/beorn7/perks/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.0.0 diff --git a/vendor/github.com/beorn7/perks/go.mod b/vendor/github.com/beorn7/perks/go.mod deleted file mode 100644 index cef4dd256..000000000 --- a/vendor/github.com/beorn7/perks/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/beorn7/perks - -go 1.12 diff --git a/vendor/github.com/beorn7/perks/histogram/histogram_test.go b/vendor/github.com/beorn7/perks/histogram/histogram_test.go index 277749d46..0575ebeee 100644 --- a/vendor/github.com/beorn7/perks/histogram/histogram_test.go +++ b/vendor/github.com/beorn7/perks/histogram/histogram_test.go @@ -6,7 +6,7 @@ import ( ) func TestHistogram(t *testing.T) { - const numPoints = 1000000 + const numPoints = 1e6 const maxBins = 3 h := New(maxBins) diff --git a/vendor/github.com/davecgh/go-spew/.travis.yml b/vendor/github.com/davecgh/go-spew/.travis.yml index 82e742fa6..1f4cbf542 100644 --- a/vendor/github.com/davecgh/go-spew/.travis.yml +++ b/vendor/github.com/davecgh/go-spew/.travis.yml @@ -6,7 +6,6 @@ go: - 1.8.x - 1.9.x - 1.10.x - - 1.11.x - tip sudo: false install: diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml index 98ad417e2..0e9d6edc0 100644 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,8 +1,7 @@ language: go go: - - "1.9" - - "1.10" - - "1.11" + - 1.3 + - 1.4 script: - go test - go build diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md index 0200f75b4..f8f7e3695 100644 --- a/vendor/github.com/ghodss/yaml/README.md +++ b/vendor/github.com/ghodss/yaml/README.md @@ -4,13 +4,13 @@ ## Introduction -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). +This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). ## Caveats @@ -44,8 +44,6 @@ import "github.com/ghodss/yaml" Usage is very similar to the JSON library: ```go -package main - import ( "fmt" @@ -53,8 +51,8 @@ import ( ) type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"name"` } func main() { @@ -67,13 +65,13 @@ func main() { } fmt.Println(string(y)) /* Output: - age: 30 name: John + age: 30 */ // Unmarshal the YAML back into a Person struct. var p2 Person - err = yaml.Unmarshal(y, &p2) + err := yaml.Unmarshal(y, &p2) if err != nil { fmt.Printf("err: %v\n", err) return @@ -88,14 +86,11 @@ func main() { `yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: ```go -package main - import ( "fmt" "github.com/ghodss/yaml" ) - func main() { j := []byte(`{"name": "John", "age": 30}`) y, err := yaml.JSONToYAML(j) diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go index 586007402..0bd3c2b46 100644 --- a/vendor/github.com/ghodss/yaml/fields.go +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -45,11 +45,7 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te break } if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } + v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { diff --git a/vendor/github.com/ghodss/yaml/go.mod b/vendor/github.com/ghodss/yaml/go.mod deleted file mode 100644 index 8d9ad7b64..000000000 --- a/vendor/github.com/ghodss/yaml/go.mod +++ /dev/null @@ -1,3 +0,0 @@ -module github.com/ghodss/yaml - -require gopkg.in/yaml.v2 v2.2.2 diff --git a/vendor/github.com/ghodss/yaml/go.sum b/vendor/github.com/ghodss/yaml/go.sum deleted file mode 100644 index bd555a333..000000000 --- a/vendor/github.com/ghodss/yaml/go.sum +++ /dev/null @@ -1,3 +0,0 @@ -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go index dfd264d6c..c02beacb9 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -1,20 +1,9 @@ -// Package yaml provides a wrapper around go-yaml designed to enable a better -// way of handling YAML when marshaling to and from structs. -// -// In short, this package first converts YAML to JSON using go-yaml and then -// uses json.Marshal and json.Unmarshal to convert to or from the struct. This -// means that it effectively reuses the JSON struct tags as well as the custom -// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml. -// -// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang -// -package yaml // import "github.com/ghodss/yaml" +package yaml import ( "bytes" "encoding/json" "fmt" - "io" "reflect" "strconv" @@ -26,41 +15,26 @@ import ( func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: ", err) } y, err := JSONToYAML(j) if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + return nil, fmt.Errorf("error converting JSON to YAML: ", err) } return y, nil } -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.Unmarshal, y, o, opts) -} - -// UnmarshalStrict is like Unmarshal except that any mapping keys that are -// duplicates will result in an error. -// To also be strict about unknown fields, add the DisallowUnknownFields option. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.UnmarshalStrict, y, o, opts) -} - -func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error { +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo, f) + j, err := yamlToJSON(y, &vo) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = json.Unmarshal(j, o) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -68,28 +42,13 @@ func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o inter return nil } -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - // Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 + // etc.) when unmarshling to interface{}, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. err := yaml.Unmarshal(j, &jsonObj) @@ -101,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -111,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) -} - -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSON(y, nil) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := yaml.Unmarshal(y, &yamlObj) if err != nil { return nil, err } @@ -134,7 +85,7 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, // YAML objects are not completely compatible with JSON objects (e.g. you // can have non-string keys in YAML). So, convert the YAML-compatible object // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilities happen along the way. + // incompatibilties happen along the way. jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) if err != nil { return nil, err diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a22..000000000 --- a/vendor/github.com/ghodss/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110_test.go b/vendor/github.com/ghodss/yaml/yaml_go110_test.go deleted file mode 100644 index 753ee3f6f..000000000 --- a/vendor/github.com/ghodss/yaml/yaml_go110_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build go1.10 - -package yaml - -import ( - "fmt" - "reflect" - "strings" - "testing" -) - -func TestUnmarshalWithTags(t *testing.T) { - type WithTaggedField struct { - Field string `json:"field"` - } - - t.Run("Known tagged field", func(t *testing.T) { - y := []byte(`field: "hello"`) - v := WithTaggedField{} - if err := Unmarshal(y, &v, DisallowUnknownFields); err != nil { - t.Errorf("unexpected error: %v", err) - } - if v.Field != "hello" { - t.Errorf("v.Field=%v, want 'hello'", v.Field) - } - - }) - t.Run("With unknown tagged field", func(t *testing.T) { - y := []byte(`unknown: "hello"`) - v := WithTaggedField{} - err := Unmarshal(y, &v, DisallowUnknownFields) - if err == nil { - t.Errorf("want error because of unknown field, got : v=%#v", v) - } - }) - -} - -// TestUnmarshalStrictWithJSONOpts tests that we return an error if there are -// duplicate fields in the YAML input. -func TestUnmarshalStrictWithJSONOpts(t *testing.T) { - for _, tc := range []struct { - yaml []byte - opts []JSONOpt - want UnmarshalString - wantErr string - }{ - { - // By default, unknown field is ignored. - yaml: []byte("a: 1\nunknownField: 2"), - want: UnmarshalString{A: "1"}, - }, - { - // Unknown field produces an error with `DisallowUnknownFields` option. - yaml: []byte("a: 1\nunknownField: 2"), - opts: []JSONOpt{DisallowUnknownFields}, - wantErr: `unknown field "unknownField"`, - }, - } { - po := prettyFunctionName(tc.opts) - s := UnmarshalString{} - err := UnmarshalStrict(tc.yaml, &s, tc.opts...) - if tc.wantErr != "" && err == nil { - t.Errorf("UnmarshalStrict(%#q, &s, %v) = nil; want error", string(tc.yaml), po) - continue - } - if tc.wantErr == "" && err != nil { - t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want no error", string(tc.yaml), po, err) - continue - } - // We expect that duplicate fields are discovered during JSON unmarshalling. - if want := "error unmarshaling JSON"; tc.wantErr != "" && !strings.Contains(err.Error(), want) { - t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want err contains %#q", string(tc.yaml), po, err, want) - } - if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { - t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %v; want err contains %#q", string(tc.yaml), po, err, tc.wantErr) - } - - // Only test content of `s` if parsing indicated no error. - // If we got an error, `s` may be partially parsed and contain some data. - if err != nil { - continue - } - - if !reflect.DeepEqual(s, tc.want) { - t.Errorf("UnmarshalStrict(%#q, &s, %#v) = %+#v; want %+#v", string(tc.yaml), po, s, tc.want) - } - } -} - -func ExampleUnknown() { - type WithTaggedField struct { - Field string `json:"field"` - } - y := []byte(`unknown: "hello"`) - v := WithTaggedField{} - fmt.Printf("%v\n", Unmarshal(y, &v, DisallowUnknownFields)) - // Ouptut: - // unmarshaling JSON: while decoding JSON: json: unknown field "unknown" -} diff --git a/vendor/github.com/ghodss/yaml/yaml_test.go b/vendor/github.com/ghodss/yaml/yaml_test.go index e31b402c5..0ae0954e9 100644 --- a/vendor/github.com/ghodss/yaml/yaml_test.go +++ b/vendor/github.com/ghodss/yaml/yaml_test.go @@ -4,9 +4,7 @@ import ( "fmt" "math" "reflect" - "runtime" "strconv" - "strings" "testing" ) @@ -21,7 +19,7 @@ type MarshalTest struct { func TestMarshal(t *testing.T) { f32String := strconv.FormatFloat(math.MaxFloat32, 'g', -1, 32) s := MarshalTest{"a", math.MaxInt64, math.MaxFloat32} - e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", int64(math.MaxInt64), f32String)) + e := []byte(fmt.Sprintf("A: a\nB: %d\nC: %s\n", math.MaxInt64, f32String)) y, err := Marshal(s) if err != nil { @@ -64,186 +62,43 @@ func TestUnmarshal(t *testing.T) { y := []byte("a: 1") s1 := UnmarshalString{} e1 := UnmarshalString{A: "1"} - unmarshalEqual(t, y, &s1, &e1) + unmarshal(t, y, &s1, &e1) y = []byte("a: true") s1 = UnmarshalString{} e1 = UnmarshalString{A: "true"} - unmarshalEqual(t, y, &s1, &e1) + unmarshal(t, y, &s1, &e1) y = []byte("true: 1") s1 = UnmarshalString{} e1 = UnmarshalString{True: "1"} - unmarshalEqual(t, y, &s1, &e1) + unmarshal(t, y, &s1, &e1) y = []byte("a:\n a: 1") s2 := UnmarshalNestedString{} e2 := UnmarshalNestedString{NestedString{"1"}} - unmarshalEqual(t, y, &s2, &e2) + unmarshal(t, y, &s2, &e2) y = []byte("a:\n - b: abc\n c: def\n - b: 123\n c: 456\n") s3 := UnmarshalSlice{} e3 := UnmarshalSlice{[]NestedSlice{NestedSlice{"abc", strPtr("def")}, NestedSlice{"123", strPtr("456")}}} - unmarshalEqual(t, y, &s3, &e3) + unmarshal(t, y, &s3, &e3) y = []byte("a:\n b: 1") s4 := UnmarshalStringMap{} e4 := UnmarshalStringMap{map[string]string{"b": "1"}} - unmarshalEqual(t, y, &s4, &e4) - - y = []byte(` -a: - name: TestA -b: - name: TestB -`) - type NamedThing struct { - Name string `json:"name"` - } - s5 := map[string]*NamedThing{} - e5 := map[string]*NamedThing{ - "a": &NamedThing{Name: "TestA"}, - "b": &NamedThing{Name: "TestB"}, - } - unmarshalEqual(t, y, &s5, &e5) + unmarshal(t, y, &s4, &e4) } -// TestUnmarshalNonStrict tests that we parse ambiguous YAML without error. -func TestUnmarshalNonStrict(t *testing.T) { - for _, tc := range []struct { - yaml []byte - want UnmarshalString - }{ - { - yaml: []byte("a: 1"), - want: UnmarshalString{A: "1"}, - }, - { - // Unknown field get ignored. - yaml: []byte("a: 1\nunknownField: 2"), - want: UnmarshalString{A: "1"}, - }, - { - // Unknown fields get ignored. - yaml: []byte("unknownOne: 2\na: 1\nunknownTwo: 2"), - want: UnmarshalString{A: "1"}, - }, - { - // Last declaration of `a` wins. - yaml: []byte("a: 1\na: 2"), - want: UnmarshalString{A: "2"}, - }, - { - // Even ignore first declaration of `a` with wrong type. - yaml: []byte("a: [1,2,3]\na: value-of-a"), - want: UnmarshalString{A: "value-of-a"}, - }, - { - // Last value of `a` and first and only mention of `true` are parsed. - yaml: []byte("true: string-value-of-yes\na: 1\na: [1,2,3]\na: value-of-a"), - want: UnmarshalString{A: "value-of-a", True: "string-value-of-yes"}, - }, - { - // In YAML, `YES` is a Boolean true. - yaml: []byte("true: YES"), - want: UnmarshalString{True: "true"}, - }, - } { - s := UnmarshalString{} - unmarshalEqual(t, tc.yaml, &s, &tc.want) - } -} - -// prettyFunctionName converts a slice of JSONOpt function pointers to a human -// readable string representation. -func prettyFunctionName(opts []JSONOpt) []string { - var r []string - for _, o := range opts { - r = append(r, runtime.FuncForPC(reflect.ValueOf(o).Pointer()).Name()) - } - return r -} - -func unmarshalEqual(t *testing.T, y []byte, s, e interface{}, opts ...JSONOpt) { - t.Helper() - err := Unmarshal(y, s, opts...) +func unmarshal(t *testing.T, y []byte, s, e interface{}) { + err := Unmarshal(y, s) if err != nil { - t.Errorf("Unmarshal(%#q, s, %v) = %v", string(y), prettyFunctionName(opts), err) - return + t.Errorf("error unmarshaling YAML: %v", err) } if !reflect.DeepEqual(s, e) { - t.Errorf("Unmarshal(%#q, s, %v) = %+#v; want %+#v", string(y), prettyFunctionName(opts), s, e) - } -} - -// TestUnmarshalStrict tests that we return an error on ambiguous YAML. -func TestUnmarshalStrict(t *testing.T) { - for _, tc := range []struct { - yaml []byte - want UnmarshalString - wantErr string - }{ - { - yaml: []byte("a: 1"), - want: UnmarshalString{A: "1"}, - }, - { - // Order does not matter. - yaml: []byte("true: 1\na: 2"), - want: UnmarshalString{A: "2", True: "1"}, - }, - { - // By default, unknown field is ignored. - yaml: []byte("a: 1\nunknownField: 2"), - want: UnmarshalString{A: "1"}, - }, - { - // Declaring `a` twice produces an error. - yaml: []byte("a: 1\na: 2"), - wantErr: `key "a" already set in map`, - }, - { - // Not ignoring first declaration of A with wrong type. - yaml: []byte("a: [1,2,3]\na: value-of-a"), - wantErr: `key "a" already set in map`, - }, - { - // Declaring field `true` twice. - yaml: []byte("true: string-value-of-yes\ntrue: 1"), - wantErr: `key true already set in map`, - }, - { - // In YAML, `YES` is a Boolean true. - yaml: []byte("true: YES"), - want: UnmarshalString{True: "true"}, - }, - } { - s := UnmarshalString{} - err := UnmarshalStrict(tc.yaml, &s) - if tc.wantErr != "" && err == nil { - t.Errorf("UnmarshalStrict(%#q, &s) = nil; want error", string(tc.yaml)) - continue - } - if tc.wantErr == "" && err != nil { - t.Errorf("UnmarshalStrict(%#q, &s) = %v; want no error", string(tc.yaml), err) - continue - } - // We only expect errors during unmarshalling YAML. - if want := "yaml: unmarshal errors"; tc.wantErr != "" && !strings.Contains(err.Error(), want) { - t.Errorf("UnmarshalStrict(%#q, &s) = %v; want err contains %#q", string(tc.yaml), err, want) - } - if tc.wantErr != "" && !strings.Contains(err.Error(), tc.wantErr) { - t.Errorf("UnmarshalStrict(%#q, &s) = %v; want err contains %#q", string(tc.yaml), err, tc.wantErr) - } - - // Even if there was an error, we continue the test: We expect that all - // errors occur during YAML unmarshalling. Such errors leaves `s` unmodified - // and the following check will compare default values of `UnmarshalString`. - - if !reflect.DeepEqual(s, tc.want) { - t.Errorf("UnmarshalStrict(%#q, &s) = %+#v; want %+#v", string(tc.yaml), s, tc.want) - } + t.Errorf("unmarshal YAML was unsuccessful, expected: %+#v, got: %+#v", + e, s) } } @@ -414,16 +269,3 @@ func runCases(t *testing.T, runType RunType, cases []Case) { func strPtr(s string) *string { return &s } - -func TestYAMLToJSONStrict(t *testing.T) { - const data = ` -foo: bar -foo: baz -` - if _, err := YAMLToJSON([]byte(data)); err != nil { - t.Error("expected YAMLtoJSON to pass on duplicate field names") - } - if _, err := YAMLToJSONStrict([]byte(data)); err == nil { - t.Error("expected YAMLtoJSONStrict to fail on duplicate field names") - } -} diff --git a/vendor/github.com/openshift/api/.travis.yml b/vendor/github.com/openshift/api/.travis.yml index 494d91ee4..d67d8be64 100644 --- a/vendor/github.com/openshift/api/.travis.yml +++ b/vendor/github.com/openshift/api/.travis.yml @@ -1,7 +1,7 @@ language: go go: - - "1.11" + - "1.12" install: - wget https://github.com/google/protobuf/releases/download/v3.0.2/protoc-3.0.2-linux-x86_64.zip diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index ba83ccb14..44fd3a6e0 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -1,33 +1,50 @@ all: build .PHONY: all -RUNTIME ?= podman -RUNTIME_IMAGE_NAME ?= openshift-api-generator +# Include the library makefile +include $(addprefix ./hack/alpha-build-machinery/make/, \ + golang.mk \ + targets/openshift/deps.mk \ + targets/openshift/crd-schema-gen.mk \ +) -build: - go build github.com/openshift/api/... -.PHONY: build +GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out vendor/,$(filter-out hack/,$(wildcard */))))) +GO_BUILD_PACKAGES :=$(GO_PACKAGES) +GO_BUILD_PACKAGES_EXPANDED :=$(GO_BUILD_PACKAGES) +# LDFLAGS are not needed for dummy builds (saving time on calling git commands) +GO_LD_FLAGS:= -test: - go test github.com/openshift/api/... -.PHONY: test +# Set crd-schema-gen variables +CONTROLLER_GEN_VERSION :=v0.2.1 -verify: +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +$(call add-crd-gen,authorization,./authorization/v1,./authorization/v1,./authorization/v1) +$(call add-crd-gen,config,./config/v1,./config/v1,./config/v1) +$(call add-crd-gen,console,./console/v1,./console/v1,./console/v1) +$(call add-crd-gen,operator,./operator/v1alpha1,./operator/v1alpha1,./operator/v1alpha1) +$(call add-crd-gen,quota,./quota/v1,./quota/v1,./quota/v1) +$(call add-crd-gen,security,./security/v1,./security/v1,./security/v1) + +RUNTIME ?= podman +RUNTIME_IMAGE_NAME ?= openshift-api-generator + +verify-scripts: bash -x hack/verify-deepcopy.sh bash -x hack/verify-protobuf.sh bash -x hack/verify-swagger-docs.sh -.PHONY: verify - -update-deps: - hack/update-deps.sh -.PHONY: update-deps +.PHONY: verify-scripts +verify: verify-scripts -generate-with-container: Dockerfile.build - $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . - $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make generate - -generate: +update-scripts: hack/update-deepcopy.sh hack/update-protobuf.sh hack/update-swagger-docs.sh -.PHONY: generate +.PHONY: update-scripts +update: update-scripts + +generate-with-container: Dockerfile.build + $(RUNTIME) build -t $(RUNTIME_IMAGE_NAME) -f Dockerfile.build . + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make update-scripts diff --git a/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml new file mode 100644 index 000000000..5ec81b3bd --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml @@ -0,0 +1,205 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: rolebindingrestrictions.authorization.openshift.io +spec: + group: authorization.openshift.io + names: + kind: RoleBindingRestriction + listKind: RoleBindingRestrictionList + plural: rolebindingrestrictions + singular: rolebindingrestriction + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: RoleBindingRestriction is an object that can be matched against + a subject (user, group, or service account) to determine whether rolebindings + on that subject are allowed in the namespace to which the RoleBindingRestriction + belongs. If any one of those RoleBindingRestriction objects matches a subject, + rolebindings on that subject in the namespace are allowed. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: Spec defines the matcher. + type: object + properties: + grouprestriction: + description: GroupRestriction matches against group subjects. + type: object + properties: + groups: + description: Groups is a list of groups used to match against an + individual user's groups. If the user is a member of one of the + whitelisted groups, the user is allowed to be bound to a role. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over + group labels. + type: array + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + nullable: true + nullable: true + serviceaccountrestriction: + description: ServiceAccountRestriction matches against service-account + subjects. + type: object + properties: + namespaces: + description: Namespaces specifies a list of literal namespace names. + type: array + items: + type: string + serviceaccounts: + description: ServiceAccounts specifies a list of literal service-account + names. + type: array + items: + description: ServiceAccountReference specifies a service account + and namespace by their names. + type: object + properties: + name: + description: Name is the name of the service account. + type: string + namespace: + description: Namespace is the namespace of the service account. Service + accounts from inside the whitelisted namespaces are allowed + to be bound to roles. If Namespace is empty, then the namespace + of the RoleBindingRestriction in which the ServiceAccountReference + is embedded is used. + type: string + nullable: true + userrestriction: + description: UserRestriction matches against user subjects. + type: object + properties: + groups: + description: Groups specifies a list of literal group names. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over + user labels. + type: array + items: + description: A label selector is a label query over a set of resources. + The result of matchLabels and matchExpressions are ANDed. An + empty label selector matches all objects. A null label selector + matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector + requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector + that contains values, a key, and an operator that relates + the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector + applies to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, + Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If + the operator is In or NotIn, the values array must + be non-empty. If the operator is Exists or DoesNotExist, + the values array must be empty. This array is replaced + during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A + single {key,value} in the matchLabels map is equivalent + to an element of matchExpressions, whose key field is "key", + the operator is "In", and the values array contains only + "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + nullable: true + users: + description: Users specifies a list of literal user names. + type: array + items: + type: string + nullable: true diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml new file mode 100644 index 000000000..131914124 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_operatorhub.crd.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: operatorhubs.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OperatorHub + listKind: OperatorHubList + plural: operatorhubs + singular: operatorhub + scope: Cluster + subresources: + status: {} + version: v1 + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: OperatorHub is the Schema for the operatorhubs API. It can be used + to change the state of the default hub sources for OperatorHub on the cluster + from enabled to disabled and vice versa. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperatorHubSpec defines the desired state of OperatorHub + type: object + properties: + disableAllDefaultSources: + description: disableAllDefaultSources allows you to disable all the + default hub sources. If this is true, a specific entry in sources + can be used to enable a default source. If this is false, a specific + entry in sources can be used to disable or enable a default source. + type: boolean + sources: + description: sources is the list of default hub sources and their configuration. + If the list is empty, it implies that the default hub sources are + enabled on the cluster unless disableAllDefaultSources is true. If + disableAllDefaultSources is true and sources is not empty, the configuration + present in sources will take precedence. The list of default hub sources + and their current state will always be reflected in the status block. + type: array + items: + description: HubSource is used to specify the hub source and its configuration + type: object + properties: + disabled: + description: disabled is used to disable a default hub source + on cluster + type: boolean + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: OperatorHubStatus defines the observed state of OperatorHub. + The current state of the default hub sources will always be reflected + here. + type: object + properties: + sources: + description: sources encapsulates the result of applying the configuration + for each hub source + type: array + items: + description: HubSourceStatus is used to reflect the current state + of applying the configuration to a default source + type: object + properties: + disabled: + description: disabled is used to disable a default hub source + on cluster + type: boolean + message: + description: message provides more information regarding failures + type: string + name: + description: name is the name of one of the default hub sources + type: string + maxLength: 253 + minLength: 1 + status: + description: status indicates success or failure in applying the + configuration + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml new file mode 100644 index 000000000..35d92449d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_03_config-operator_01_proxy.crd.yaml @@ -0,0 +1,97 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: proxies.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + versions: + - name: v1 + served: true + storage: true + names: + kind: Proxy + listKind: ProxyList + plural: proxies + singular: proxy + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Proxy holds cluster-wide information on how to configure default + proxies for the cluster. The canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the proxy configuration + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty + means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs + for which the proxy should not be used. Empty means unset and will + not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used to verify + readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing a CA + certificate bundle used for client egress HTTPS connections. The certificate + bundle must be from the CA that signed the proxy's certificate and + be signed for everything. The trustedCA field should only be consumed + by a proxy validator. The validator is responsible for reading the + certificate bundle from required key \"ca-bundle.crt\" and copying + it to a ConfigMap named \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. The namespace for the ConfigMap referenced by trustedCA + is \"openshift-config\". Here is an example ConfigMap (in yaml): \n + apiVersion: v1 kind: ConfigMap metadata: name: user-ca-bundle namespace: + openshift-config data: ca-bundle.crt: | -----BEGIN CERTIFICATE----- + \ Custom CA certificate bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames and/or CIDRs + for which the proxy should not be used. + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml new file mode 100644 index 000000000..daf2b8f89 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml @@ -0,0 +1,209 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + names: + kind: APIServer + singular: apiserver + plural: apiservers + listKind: APIServerList + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: APIServer holds configuration (like serving certificates, client + CA and CORS domains) shared by all API servers in the system, among them especially + kube-apiserver and openshift-apiserver. The canonical name of an instance + is 'cluster'. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined + regular expressions describing hosts for which the API server allows + access using the CORS headers. This may be needed to access the API + and the integrated OAuth server from JavaScript applications. The + values are regular expressions that correspond to the Golang regular + expression language. + type: array + items: + type: string + clientCA: + description: 'clientCA references a ConfigMap containing a certificate + bundle for the signers that will be recognized for incoming client + certificates in addition to the operator managed signers. If this + is empty, then only operator managed signers are valid. You usually + only have to set this if you have your own PKI you wish to honor client + certificates from. The ConfigMap must exist in the openshift-config + namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] + - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + encryption: + description: encryption allows the configuration of encryption of resources + at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to + encrypt resources at the datastore layer. When this field is unset + (i.e. when it is set to the empty string), identity is implied. + The behavior of unset can and will change over time. Even if + encryption is enabled by default, the meaning of unset may change + to a different encryption type based on changes in best practices. + \n When encryption is enabled, all sensitive resources shipped + with the platform are encrypted. This list of sensitive resources + can and will change over time. The current authoritative list + is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io + \ 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. + If not specified, operator managed certificates will be used for serving + secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the + TLS cert info for serving secure traffic to specific hostnames. + If no named certificates are provided, or no named certificates + match the server name as understood by a client, the defaultServingCertificate + will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, + as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names + (leading wildcards allowed) that should use this certificate + to serve secure traffic. If no names are provided, the implicit + names will be extracted from the certificates. Exact names + trump over wildcard names. Explicit names defined here trump + over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls + type secret containing the TLS cert info for serving secure + traffic. The secret must exist in the openshift-config namespace + and contain the following required fields: - Secret.Data["tls.key"] + - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections + for externally exposed servers. \n If unset, a default (which may + change between releases) is chosen." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be + extremely careful using a custom profile as invalid configurations + can be catastrophic. An example custom profile looks like this: + \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 + \ minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms + that are negotiated during the TLS handshake. Operators may + remove entries their operands do not support. For example, + to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version + of the TLS protocol that is negotiated during the TLS handshake. + For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): + \n minTLSVersion: TLSv1.1" + type: string + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n + https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ minTLSVersion: TLSv1.3" + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 + \ - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 + \ - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 + \ - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 + \ - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 + \ - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 + \ - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - + ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA + \ - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 + \ - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: + TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. + Custom provides the ability to specify individual TLS security + profile parameters. Old, Intermediate and Modern are TLS security + profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + \n The profiles are intent based, so they may change over time + as new ciphers are developed and existing ciphers are found to + be insecure. Depending on precisely which ciphers are available + to a process, the list may be reduced." + type: string + status: + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml new file mode 100644 index 000000000..a5e3479f5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_authentication.crd.yaml @@ -0,0 +1,123 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: authentications.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Authentication + listKind: AuthenticationList + plural: authentications + singular: authentication + scope: Cluster + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: Authentication specifies cluster-wide settings for authentication + (like OAuth and webhook token authenticators). The canonical name of an instance + is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + oauthMetadata: + description: 'oauthMetadata contains the discovery endpoint data for + OAuth 2.0 Authorization Server Metadata for an external OAuth server. + This discovery document can be viewed from its served location: oc + get --raw ''/.well-known/oauth-authorization-server'' For further + details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + If oauthMetadata.name is non-empty, this value has precedence over + any metadata reference stored in status. The key "oauthMetadata" is + used to locate the data. If specified and the config map or expected + key is not found, no metadata is served. If the specified metadata + is not valid, no metadata is served. The namespace for this config + map is openshift-config.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + type: + description: type identifies the cluster managed, user facing authentication + mode in use. Specifically, it manages the component that responds + to login attempts. The default is IntegratedOAuth. + type: string + webhookTokenAuthenticators: + description: webhookTokenAuthenticators configures remote token reviewers. + These remote authentication webhooks can be used to verify bearer + tokens via the tokenreviews.authentication.k8s.io REST API. This + is required to honor bearer tokens that are provisioned by an external + authentication service. The namespace for these secrets is openshift-config. + type: array + items: + description: webhookTokenAuthenticator holds the necessary configuration + options for a remote token authenticator + type: object + properties: + kubeConfig: + description: 'kubeConfig contains kube config file data which + describes how to access the remote webhook service. For further + details, see: https://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication + The key "kubeConfig" is used to locate the data. If the secret + or expected key is not found, the webhook is not honored. If + the specified kube config data is not valid, the webhook is + not honored. The namespace for this secret is determined by + the point of use.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + integratedOAuthMetadata: + description: 'integratedOAuthMetadata contains the discovery endpoint + data for OAuth 2.0 Authorization Server Metadata for the in-cluster + integrated OAuth server. This discovery document can be viewed from + its served location: oc get --raw ''/.well-known/oauth-authorization-server'' + For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + This contains the observed value based on cluster state. An explicitly + set value in spec.oauthMetadata has precedence over this field. This + field has no meaning if authentication spec.type is not set to IntegratedOAuth. + The key "oauthMetadata" is used to locate the data. If the config + map or expected key is not found, no metadata is served. If the specified + metadata is not valid, no metadata is served. The namespace for this + config map is openshift-config-managed.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml new file mode 100644 index 000000000..1f11db48d --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -0,0 +1,365 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: builds.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + names: + kind: Build + singular: build + plural: builds + listKind: BuildList + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: "Build configures the behavior of OpenShift builds for the entire + cluster. This includes default settings that can be overridden in BuildConfig + objects, and overrides which are applied to all builds. \n The canonical name + is \"cluster\"" + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec holds user-settable values for the build controller configuration + type: object + properties: + additionalTrustedCA: + description: "AdditionalTrustedCA is a reference to a ConfigMap containing + additional CAs that should be trusted for image pushes and pulls during + builds. The namespace for this config map is openshift-config. \n + DEPRECATED: Additional CAs for image pull and push should be set on + image.config.openshift.io/cluster instead." + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + buildDefaults: + description: BuildDefaults controls the default information for Builds + type: object + properties: + defaultProxy: + description: "DefaultProxy contains the default proxy settings for + all build operations, including image pull/push and source download. + \n Values can be overrode by setting the `HTTP_PROXY`, `HTTPS_PROXY`, + and `NO_PROXY` environment variables in the build config's strategy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty + means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames + and/or CIDRs for which the proxy should not be used. Empty + means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used + to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing + a CA certificate bundle used for client egress HTTPS connections. + The certificate bundle must be from the CA that signed the + proxy's certificate and be signed for everything. The trustedCA + field should only be consumed by a proxy validator. The validator + is responsible for reading the certificate bundle from required + key \"ca-bundle.crt\" and copying it to a ConfigMap named + \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. The namespace for the ConfigMap referenced by trustedCA + is \"openshift-config\". Here is an example ConfigMap (in + yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: + user-ca-bundle namespace: openshift-config data: ca-bundle.crt: + | -----BEGIN CERTIFICATE----- Custom CA certificate + bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + env: + description: Env is a set of default environment variables that + will be applied to the build if the specified variables do not + exist on the build + type: array + items: + description: EnvVar represents an environment variable present + in a Container. + type: object + required: + - name + properties: + name: + description: Name of the environment variable. Must be a C_IDENTIFIER. + type: string + value: + description: 'Variable references $(VAR_NAME) are expanded + using the previous defined environment variables in the + container and any service environment variables. If a variable + cannot be resolved, the reference in the input string will + be unchanged. The $(VAR_NAME) syntax can be escaped with + a double $$, ie: $$(VAR_NAME). Escaped references will never + be expanded, regardless of whether the variable exists or + not. Defaults to "".' + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + type: object + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + type: object + required: + - key + properties: + key: + description: The key to select. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the ConfigMap or its + key must be defined + type: boolean + fieldRef: + description: 'Selects a field of the pod: supports metadata.name, + metadata.namespace, metadata.labels, metadata.annotations, + spec.nodeName, spec.serviceAccountName, status.hostIP, + status.podIP.' + type: object + required: + - fieldPath + properties: + apiVersion: + description: Version of the schema the FieldPath is + written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in the specified + API version. + type: string + resourceFieldRef: + description: 'Selects a resource of the container: only + resources limits and requests (limits.cpu, limits.memory, + limits.ephemeral-storage, requests.cpu, requests.memory + and requests.ephemeral-storage) are currently supported.' + type: object + required: + - resource + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + description: Specifies the output format of the exposed + resources, defaults to "1" + type: string + resource: + description: 'Required: resource to select' + type: string + secretKeyRef: + description: Selects a key of a secret in the pod's namespace + type: object + required: + - key + properties: + key: + description: The key of the secret to select from. Must + be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, + uid?' + type: string + optional: + description: Specify whether the Secret or its key + must be defined + type: boolean + gitProxy: + description: "GitProxy contains the proxy settings for git operations + only. If set, this will override any Proxy settings for all git + commands, such as git clone. \n Values that are not set here will + be inherited from DefaultProxy." + type: object + properties: + httpProxy: + description: httpProxy is the URL of the proxy for HTTP requests. Empty + means unset and will not result in an env var. + type: string + httpsProxy: + description: httpsProxy is the URL of the proxy for HTTPS requests. Empty + means unset and will not result in an env var. + type: string + noProxy: + description: noProxy is a comma-separated list of hostnames + and/or CIDRs for which the proxy should not be used. Empty + means unset and will not result in an env var. + type: string + readinessEndpoints: + description: readinessEndpoints is a list of endpoints used + to verify readiness of the proxy. + type: array + items: + type: string + trustedCA: + description: "trustedCA is a reference to a ConfigMap containing + a CA certificate bundle used for client egress HTTPS connections. + The certificate bundle must be from the CA that signed the + proxy's certificate and be signed for everything. The trustedCA + field should only be consumed by a proxy validator. The validator + is responsible for reading the certificate bundle from required + key \"ca-bundle.crt\" and copying it to a ConfigMap named + \"trusted-ca-bundle\" in the \"openshift-config-managed\" + namespace. The namespace for the ConfigMap referenced by trustedCA + is \"openshift-config\". Here is an example ConfigMap (in + yaml): \n apiVersion: v1 kind: ConfigMap metadata: name: + user-ca-bundle namespace: openshift-config data: ca-bundle.crt: + | -----BEGIN CERTIFICATE----- Custom CA certificate + bundle. -----END CERTIFICATE-----" + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + imageLabels: + description: ImageLabels is a list of docker labels that are applied + to the resulting image. User can override a default label by providing + a label with the same name in their Build/BuildConfig. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have + non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + resources: + description: Resources defines resource requirements to execute + the build. + type: object + properties: + limits: + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + requests: + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' + type: object + additionalProperties: + type: string + buildOverrides: + description: BuildOverrides controls override settings for builds + type: object + properties: + imageLabels: + description: ImageLabels is a list of docker labels that are applied + to the resulting image. If user provided a label in their Build/BuildConfig + with the same name as one in this list, the user's label will + be overwritten. + type: array + items: + type: object + properties: + name: + description: Name defines the name of the label. It must have + non-zero length. + type: string + value: + description: Value defines the literal value of the label. + type: string + nodeSelector: + description: NodeSelector is a selector which must be true for the + build pod to fit on a node + type: object + additionalProperties: + type: string + tolerations: + description: Tolerations is a list of Tolerations that will override + any existing tolerations set on a build pod. + type: array + items: + description: The pod this Toleration is attached to tolerates + any taint that matches the triple using the + matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match + all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to + Equal. Exists is equivalent to wildcard for value, so that + a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, + it is not set, which means tolerate the taint forever (do + not evict). Zero and negative values will be treated as + 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml new file mode 100644 index 000000000..8d07e581f --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_console.crd.yaml @@ -0,0 +1,70 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consoles.config.openshift.io +spec: + scope: Cluster + group: config.openshift.io + names: + kind: Console + listKind: ConsoleList + plural: consoles + singular: console + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: Console holds cluster-wide configuration for the web console, including + the logout URL, and reports the public URL of the console. The canonical name + is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + authentication: + description: ConsoleAuthentication defines a list of optional configuration + for console authentication. + type: object + properties: + logoutRedirect: + description: 'An optional, absolute URL to redirect web browsers + to after logging out of the console. If not specified, it will + redirect to the default login page. This is required when using + an identity provider that supports single sign-on (SSO) such as: + - OpenID (Keycloak, Azure) - RequestHeader (GSSAPI, SSPI, SAML) + - OAuth (GitHub, GitLab, Google) Logging out of the console will + destroy the user''s token. The logoutRedirect provides the user + the option to perform single logout (SLO) through the identity + provider to destroy their single sign-on session.' + type: string + pattern: ^$|^((https):\/\/?)[^\s()<>]+(?:\([\w\d]+\)|([^[:punct:]\s]|\/?))$ + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + consoleURL: + description: The URL for the console. This will be derived from the + host for the route that is created for the console. + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml new file mode 100644 index 000000000..dba624615 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: DNS holds cluster-wide information about DNS. The canonical name + is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed + DNS records will be sub-domains of this base. \n For example, given + the base domain `openshift.example.com`, an API server DNS record + may be created for `cluster-api.openshift.example.com`. \n Once set, + this field cannot be changed." + type: string + privateZone: + description: "privateZone is the location where all the DNS records + that are only available internally to the cluster exist. \n If this + field is nil, no private records should be created. \n Once set, this + field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the + DNS hosted zone. \n on AWS zone can be fetched using `ID` as id + in [1] on Azure zone can be fetched using `ID` as a pre-determined + name in [2], on GCP zone can be fetched using `ID` as a pre-determined + name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone + using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that + are publicly accessible to the internet exist. \n If this field is + nil, no public records should be created. \n Once set, this field + cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the + DNS hosted zone. \n on AWS zone can be fetched using `ID` as id + in [1] on Azure zone can be fetched using `ID` as a pre-determined + name in [2], on GCP zone can be fetched using `ID` as a pre-determined + name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options + [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show + [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n + on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone + using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml new file mode 100644 index 000000000..fedbdb813 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -0,0 +1,76 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: featuregates.config.openshift.io +spec: + group: config.openshift.io + version: v1 + scope: Cluster + names: + kind: FeatureGate + singular: featuregate + plural: featuregates + listKind: FeatureGateList + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Feature holds cluster-wide information about feature gates. The + canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + customNoUpgrade: + description: customNoUpgrade allows the enabling or disabling of any + feature. Turning this feature set on IS NOT SUPPORTED, CANNOT BE UNDONE, + and PREVENTS UPGRADES. Because of its nature, this setting cannot + be validated. If you have any typos or accidentally apply invalid + combinations your cluster may fail in an unrecoverable way. featureSet + must equal "CustomNoUpgrade" must be set to use this field. + type: object + properties: + disabled: + description: disabled is a list of all feature gates that you want + to force off + type: array + items: + type: string + enabled: + description: enabled is a list of all feature gates that you want + to force on + type: array + items: + type: string + nullable: true + featureSet: + description: featureSet changes the list of features in the cluster. The + default is empty. Be very careful adjusting this setting. Turning + on or off features may cause irreversible changes in your cluster + which cannot be undone. + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml new file mode 100644 index 000000000..17ef92990 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_image.crd.yaml @@ -0,0 +1,144 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: images.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + names: + kind: Image + singular: image + plural: images + listKind: ImageList + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Image governs policies related to imagestream imports and runtime + configuration for external registries. It allows cluster admins to configure + which registries OpenShift is allowed to import images from, extra CA trust + bundles for external registries, and policies to blacklist/whitelist registry + hostnames. When exposing OpenShift's image registry to the public, this also + lets cluster admins specify the external hostname. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalTrustedCA: + description: additionalTrustedCA is a reference to a ConfigMap containing + additional CAs that should be trusted during imagestream import, pod + image pull, build image pull, and imageregistry pullthrough. The namespace + for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + allowedRegistriesForImport: + description: allowedRegistriesForImport limits the container image registries + that normal users may import images from. Set this list to the registries + that you trust to contain valid Docker images and that you want applications + to be able to import from. Users with permission to create Images + or ImageStreamMappings via the API are not affected by this policy + - typically only administrators or system integrations will have those + permissions. + type: array + items: + description: RegistryLocation contains a location of the registry + specified by the registry domain name. The domain name might include + wildcards, like '*' or '??'. + type: object + properties: + domainName: + description: domainName specifies a domain name for the registry + In case the registry use non-standard (80 or 443) port, the + port should be included in the domain name as well. + type: string + insecure: + description: insecure indicates whether the registry is secure + (https) or insecure (http) By default (if not specified) the + registry is assumed as secure. + type: boolean + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the + default external image registry. The external hostname should be set + only when the image registry is exposed externally. The first value + is used in 'publicDockerImageRepository' field in ImageStreams. The + value must be in "hostname[:port]" format. + type: array + items: + type: string + registrySources: + description: registrySources contains configuration that determines + how the container runtime should treat individual registries when + accessing images for builds+pods. (e.g. whether or not to allow insecure + access). It does not contain configuration for the internal cluster + registry. + type: object + properties: + allowedRegistries: + description: "allowedRegistries are whitelisted for image pull/push. + All other registries are blocked. \n Only one of BlockedRegistries + or AllowedRegistries may be set." + type: array + items: + type: string + blockedRegistries: + description: "blockedRegistries are blacklisted from image pull/push. + All other registries are allowed. \n Only one of BlockedRegistries + or AllowedRegistries may be set." + type: array + items: + type: string + insecureRegistries: + description: insecureRegistries are registries which do not have + a valid TLS certificates or only support HTTP connections. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + externalRegistryHostnames: + description: externalRegistryHostnames provides the hostnames for the + default external image registry. The external hostname should be set + only when the image registry is exposed externally. The first value + is used in 'publicDockerImageRepository' field in ImageStreams. The + value must be in "hostname[:port]" format. + type: array + items: + type: string + internalRegistryHostname: + description: internalRegistryHostname sets the hostname for the default + internal image registry. The value must be in "hostname[:port]" format. + This value is set by the image registry operator which controls the + internal registry hostname. For backward compatibility, users can + still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this + setting overrides the environment variable. + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml new file mode 100644 index 000000000..39bccd24e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure.crd.yaml @@ -0,0 +1,221 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: Infrastructure holds cluster-wide information about Infrastructure. The + canonical name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + cloudConfig: + description: cloudConfig is a reference to a ConfigMap containing the + cloud provider configuration file. This configuration file is used + to configure the Kubernetes cloud provider integration when using + the built-in cloud provider integration or the external cloud controller + manager. The namespace for this config map is openshift-config. + type: object + properties: + key: + description: Key allows pointing to a specific key/value inside + of the configmap. This is useful for logical file references. + type: string + name: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme(http/https), + address and port. apiServerInternalURL can be used by components + like kubelets, to contact the Kubernetes API server using the infrastructure + provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme(http/https), address + and port. apiServerURL can be used by components like the web console + to tell users where to find the Kubernetes API. + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV + records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a + human friendly name. Once set it should not be changed. Must be of + max length 27 and must have only alphanumeric or hyphen characters. + type: string + platform: + description: "platform is the underlying infrastructure provider for + the cluster. \n Deprecated: Use platformStatus.type instead." + type: string + platformStatus: + description: platformStatus holds status information specific to the + underlying infrastructure provider. + type: object + properties: + aws: + description: AWS contains settings specific to the Amazon Web Services + infrastructure provider. + type: object + properties: + region: + description: region holds the default AWS region for new AWS + resources created by the cluster. + type: string + azure: + description: Azure contains settings specific to the Azure infrastructure + provider. + type: object + properties: + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group + for network resources like the Virtual Network and Subnets + used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new + Azure resources created for the cluster. + type: string + baremetal: + description: BareMetal contains settings specific to the BareMetal + platform. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components inside + the cluster, like kubelets using the infrastructure rather + than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer in + front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target of + a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS + used by the nodes. Unlike the one managed by the DNS operator, + `NodeDNSIP` provides name resolution for the nodes themselves. + There is no DNS-as-a-service for BareMetal deployments. In + order to minimize necessary changes to the datacenter DNS, + a DNS service is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + gcp: + description: GCP contains settings specific to the Google Cloud + Platform infrastructure provider. + type: object + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP + resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created + for the cluster. + type: string + openstack: + description: OpenStack contains settings specific to the OpenStack + infrastructure provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components inside + the cluster, like kubelets using the infrastructure rather + than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer in + front of the API servers. + type: string + cloudName: + description: cloudName is the name of the desired OpenStack + cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target of + a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS + used by the nodes. Unlike the one managed by the DNS operator, + `NodeDNSIP` provides name resolution for the nodes themselves. + There is no DNS-as-a-service for OpenStack deployments. In + order to minimize necessary changes to the datacenter DNS, + a DNS service is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure + provider. + type: object + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact + the Kubernetes API server that can be used by components inside + the cluster, like kubelets using the infrastructure rather + than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + points to. It is the IP for a self-hosted load balancer in + front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the + default ingress controller. The IP is a suitable target of + a wildcard DNS record used to resolve default route host names. + type: string + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS + used by the nodes. Unlike the one managed by the DNS operator, + `NodeDNSIP` provides name resolution for the nodes themselves. + There is no DNS-as-a-service for oVirt deployments. In order + to minimize necessary changes to the datacenter DNS, a DNS + service is hosted as a static pod to serve those hostnames + to the nodes in the cluster. + type: string + type: + description: type is the underlying infrastructure provider for + the cluster. This value controls whether infrastructure automation + such as service load balancers, dynamic volume provisioning, machine + creation and deletion, and other integrations are enabled. If + None, no infrastructure automation is enabled. Allowed values + are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", + "VSphere", "oVirt", and "None". Individual components may not + support all platforms, and must handle unrecognized platforms + as None if they do not support that platform. + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml new file mode 100644 index 000000000..2812ce85b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_ingress.crd.yaml @@ -0,0 +1,55 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: ingresses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Ingress + listKind: IngressList + plural: ingresses + singular: ingress + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Ingress holds cluster-wide information about ingress, including + the default ingress domain used for routes. The canonical name is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + domain: + description: "domain is used to generate a default host name for a route + when the route's host name is empty. The generated host name will + follow this pattern: \"..\". + \n It is also used as the default wildcard domain suffix for ingress. + The default ingresscontroller domain will follow this pattern: \"*.\". + \n Once set, changing domain is not currently supported." + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml new file mode 100644 index 000000000..4eacca11b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_network.crd.yaml @@ -0,0 +1,141 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: networks.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Network + listKind: NetworkList + plural: networks + singular: network + scope: Cluster + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: 'Network holds cluster-wide information about Network. The canonical + name is `cluster`. It is used to configure the desired network configuration, + such as: IP address pools for services/pod IPs, network plugin, etc. Please + view network.spec for an explanation on what applies when configuring this + resource.' + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration. As a general + rule, this SHOULD NOT be read directly. Instead, you should consume the + NetworkStatus, as it indicates the currently deployed configuration. Currently, + most spec fields are immutable after installation. Please view the individual + ones for further details on each. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. This field is immutable + after installation. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. + type: integer + format: int32 + minimum: 0 + externalIP: + description: externalIP defines configuration for controllers that affect + Service.ExternalIP. If nil, then ExternalIP is not allowed to be set. + type: object + properties: + autoAssignCIDRs: + description: autoAssignCIDRs is a list of CIDRs from which to automatically + assign Service.ExternalIP. These are assigned when the service + is of type LoadBalancer. In general, this is only useful for bare-metal + clusters. In Openshift 3.x, this was misleadingly called "IngressIPs". + Automatically assigned External IPs are not affected by any ExternalIPPolicy + rules. Currently, only one entry may be provided. + type: array + items: + type: string + policy: + description: policy is a set of restrictions applied to the ExternalIP + field. If nil or empty, then ExternalIP is not allowed to be set. + type: object + properties: + allowedCIDRs: + description: allowedCIDRs is the list of allowed CIDRs. + type: array + items: + type: string + rejectedCIDRs: + description: rejectedCIDRs is the list of disallowed CIDRs. + These take precedence over allowedCIDRs. + type: array + items: + type: string + networkType: + description: 'NetworkType is the plugin that is to be deployed (e.g. + OpenShiftSDN). This should match a value that the cluster-network-operator + understands, or else no networking will be installed. Currently supported + values are: - OpenShiftSDN This field is immutable after installation.' + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. This field is immutable after installation. + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object + properties: + clusterNetwork: + description: IP address pool to use for pod IPs. + type: array + items: + description: ClusterNetworkEntry is a contiguous block of IP addresses + from which pod IPs are allocated. + type: object + properties: + cidr: + description: The complete block for pod IPs. + type: string + hostPrefix: + description: The size (prefix) of block to allocate to each node. + type: integer + format: int32 + minimum: 0 + clusterNetworkMTU: + description: ClusterNetworkMTU is the MTU for inter-pod networking. + type: integer + networkType: + description: NetworkType is the plugin that is deployed (e.g. OpenShiftSDN). + type: string + serviceNetwork: + description: IP address pool for services. Currently, we only support + a single entry here. + type: array + items: + type: string diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml new file mode 100644 index 000000000..6a7b43ccb --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_oauth.crd.yaml @@ -0,0 +1,660 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: oauths.config.openshift.io +spec: + group: config.openshift.io + names: + kind: OAuth + listKind: OAuthList + plural: oauths + singular: oauth + scope: Cluster + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: OAuth holds cluster-wide information about OAuth. The canonical + name is `cluster`. It is used to configure the integrated OAuth server. This + configuration is only honored when the top level Authentication config has + type set to IntegratedOAuth. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OAuthSpec contains desired cluster auth configuration + type: object + properties: + identityProviders: + description: identityProviders is an ordered list of ways for a user + to identify themselves. When this list is empty, no identities are + provisioned for users. + type: array + items: + description: IdentityProvider provides identities for users authenticating + using credentials + type: object + properties: + basicAuth: + description: basicAuth contains configuration options for the + BasicAuth IdP + type: object + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret + by name that contains the PEM-encoded TLS client certificate + to present when connecting to the server. The key "tls.crt" + is used to locate the data. If specified and the secret + or expected key is not found, the identity provider is not + honored. If the specified certificate data is not valid, + the identity provider is not honored. The namespace for + this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret + by name that contains the PEM-encoded TLS private key for + the client certificate referenced in tlsClientCert. The + key "tls.key" is used to locate the data. If specified and + the secret or expected key is not found, the identity provider + is not honored. If the specified certificate data is not + valid, the identity provider is not honored. The namespace + for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the remote URL to connect to + type: string + github: + description: github enables user authentication using GitHub credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + This can only be configured when hostname is set to a non-empty + value. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret + by name containing the oauth client secret. The key "clientSecret" + is used to locate the data. If the secret or expected key + is not found, the identity provider is not honored. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + hostname: + description: hostname is the optional domain (e.g. "mycompany.com") + for use with a hosted instance of GitHub Enterprise. It + must match the GitHub Enterprise settings value configured + at /setup/settings#hostname. + type: string + organizations: + description: organizations optionally restricts which organizations + are allowed to log in + type: array + items: + type: string + teams: + description: teams optionally restricts which teams are allowed + to log in. Format is /. + type: array + items: + type: string + gitlab: + description: gitlab enables user authentication using GitLab credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret + by name containing the oauth client secret. The key "clientSecret" + is used to locate the data. If the secret or expected key + is not found, the identity provider is not honored. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the oauth server base URL + type: string + google: + description: google enables user authentication using Google credentials + type: object + properties: + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret + by name containing the oauth client secret. The key "clientSecret" + is used to locate the data. If the secret or expected key + is not found, the identity provider is not honored. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + hostedDomain: + description: hostedDomain is the optional Google App domain + (e.g. "mycompany.com") to restrict logins to + type: string + htpasswd: + description: htpasswd enables user authentication using an HTPasswd + file to validate credentials + type: object + properties: + fileData: + description: fileData is a required reference to a secret + by name containing the data to use as the htpasswd file. + The key "htpasswd" is used to locate the data. If the secret + or expected key is not found, the identity provider is not + honored. If the specified htpasswd data is not valid, the + identity provider is not honored. The namespace for this + secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + keystone: + description: keystone enables user authentication using keystone + password credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + domainName: + description: domainName is required for keystone v3 + type: string + tlsClientCert: + description: tlsClientCert is an optional reference to a secret + by name that contains the PEM-encoded TLS client certificate + to present when connecting to the server. The key "tls.crt" + is used to locate the data. If specified and the secret + or expected key is not found, the identity provider is not + honored. If the specified certificate data is not valid, + the identity provider is not honored. The namespace for + this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + tlsClientKey: + description: tlsClientKey is an optional reference to a secret + by name that contains the PEM-encoded TLS private key for + the client certificate referenced in tlsClientCert. The + key "tls.key" is used to locate the data. If specified and + the secret or expected key is not found, the identity provider + is not honored. If the specified certificate data is not + valid, the identity provider is not honored. The namespace + for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + url: + description: url is the remote URL to connect to + type: string + ldap: + description: ldap enables user authentication using LDAP credentials + type: object + properties: + attributes: + description: attributes maps LDAP attributes to identities + type: object + properties: + email: + description: email is the list of attributes whose values + should be used as the email address. Optional. If unspecified, + no email is set for the identity + type: array + items: + type: string + id: + description: id is the list of attributes whose values + should be used as the user ID. Required. First non-empty + attribute is used. At least one attribute is required. + If none of the listed attribute have a value, authentication + fails. LDAP standard identity attribute is "dn" + type: array + items: + type: string + name: + description: name is the list of attributes whose values + should be used as the display name. Optional. If unspecified, + no display name is set for the identity LDAP standard + display name attribute is "cn" + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of attributes + whose values should be used as the preferred username. + LDAP standard login attribute is "uid" + type: array + items: + type: string + bindDN: + description: bindDN is an optional DN to bind with during + the search phase. + type: string + bindPassword: + description: bindPassword is an optional reference to a secret + by name containing a password to bind with during the search + phase. The key "bindPassword" is used to locate the data. + If specified and the secret or expected key is not found, + the identity provider is not honored. The namespace for + this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + insecure: + description: 'insecure, if true, indicates the connection + should not use TLS WARNING: Should not be set to `true` + with the URL scheme "ldaps://" as "ldaps://" URLs always attempt + to connect using TLS, even when `insecure` is set to `true` + When `true`, "ldap://" URLS connect insecurely. When `false`, + "ldap://" URLs are upgraded to a TLS connection using StartTLS + as specified in https://tools.ietf.org/html/rfc2830.' + type: boolean + url: + description: 'url is an RFC 2255 URL which specifies the LDAP + search parameters to use. The syntax of the URL is: ldap://host:port/basedn?attribute?scope?filter' + type: string + mappingMethod: + description: mappingMethod determines how identities from this + provider are mapped to users Defaults to "claim" + type: string + name: + description: 'name is used to qualify the identities returned + by this provider. - It MUST be unique and not shared by any + other identity provider used - It MUST be a valid path segment: + name cannot equal "." or ".." or contain "/" or "%" or ":" Ref: + https://godoc.org/github.com/openshift/origin/pkg/user/apis/user/validation#ValidateIdentityProviderName' + type: string + openID: + description: openID enables user authentication using OpenID credentials + type: object + properties: + ca: + description: ca is an optional reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. The key "ca.crt" is used to locate + the data. If specified and the config map or expected key + is not found, the identity provider is not honored. If the + specified ca data is not valid, the identity provider is + not honored. If empty, the default system roots are used. + The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + claims: + description: claims mappings + type: object + properties: + email: + description: email is the list of claims whose values + should be used as the email address. Optional. If unspecified, + no email is set for the identity + type: array + items: + type: string + name: + description: name is the list of claims whose values should + be used as the display name. Optional. If unspecified, + no display name is set for the identity + type: array + items: + type: string + preferredUsername: + description: preferredUsername is the list of claims whose + values should be used as the preferred username. If + unspecified, the preferred username is determined from + the value of the sub claim + type: array + items: + type: string + clientID: + description: clientID is the oauth client ID + type: string + clientSecret: + description: clientSecret is a required reference to the secret + by name containing the oauth client secret. The key "clientSecret" + is used to locate the data. If the secret or expected key + is not found, the identity provider is not honored. The + namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + secret + type: string + extraAuthorizeParameters: + description: extraAuthorizeParameters are any custom parameters + to add to the authorize request. + type: object + additionalProperties: + type: string + extraScopes: + description: extraScopes are any scopes to request in addition + to the standard "openid" scope. + type: array + items: + type: string + issuer: + description: issuer is the URL that the OpenID Provider asserts + as its Issuer Identifier. It must use the https scheme with + no query or fragment component. + type: string + requestHeader: + description: requestHeader enables user authentication using request + header credentials + type: object + properties: + ca: + description: ca is a required reference to a config map by + name containing the PEM-encoded CA bundle. It is used as + a trust anchor to validate the TLS certificate presented + by the remote server. Specifically, it allows verification + of incoming requests to prevent header spoofing. The key + "ca.crt" is used to locate the data. If the config map or + expected key is not found, the identity provider is not + honored. If the specified ca data is not valid, the identity + provider is not honored. The namespace for this config map + is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced + config map + type: string + challengeURL: + description: challengeURL is a URL to redirect unauthenticated + /authorize requests to Unauthenticated requests from OAuth + clients which expect WWW-Authenticate challenges will be + redirected here. ${url} is replaced with the current URL, + escaped to be safe in a query parameter https://www.example.com/sso-login?then=${url} + ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} + Required when challenge is set to true. + type: string + clientCommonNames: + description: clientCommonNames is an optional list of common + names to require a match from. If empty, any client certificate + validated against the clientCA bundle is considered authoritative. + type: array + items: + type: string + emailHeaders: + description: emailHeaders is the set of headers to check for + the email address + type: array + items: + type: string + headers: + description: headers is the set of headers to check for identity + information + type: array + items: + type: string + loginURL: + description: loginURL is a URL to redirect unauthenticated + /authorize requests to Unauthenticated requests from OAuth + clients which expect interactive logins will be redirected + here ${url} is replaced with the current URL, escaped to + be safe in a query parameter https://www.example.com/sso-login?then=${url} + ${query} is replaced with the current query string https://www.example.com/auth-proxy/oauth/authorize?${query} + Required when login is set to true. + type: string + nameHeaders: + description: nameHeaders is the set of headers to check for + the display name + type: array + items: + type: string + preferredUsernameHeaders: + description: preferredUsernameHeaders is the set of headers + to check for the preferred username + type: array + items: + type: string + type: + description: type identifies the identity provider type for this + entry. + type: string + templates: + description: templates allow you to customize pages like the login page. + type: object + properties: + error: + description: error is the name of a secret that specifies a go template + to use to render error pages during the authentication or grant + flow. The key "errors.html" is used to locate the template data. + If specified and the secret or expected key is not found, the + default error page is used. If the specified template is not valid, + the default error page is used. If unspecified, the default error + page is used. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + login: + description: login is the name of a secret that specifies a go template + to use to render the login page. The key "login.html" is used + to locate the template data. If specified and the secret or expected + key is not found, the default login page is used. If the specified + template is not valid, the default login page is used. If unspecified, + the default login page is used. The namespace for this secret + is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + providerSelection: + description: providerSelection is the name of a secret that specifies + a go template to use to render the provider selection page. The + key "providers.html" is used to locate the template data. If specified + and the secret or expected key is not found, the default provider + selection page is used. If the specified template is not valid, + the default provider selection page is used. If unspecified, the + default provider selection page is used. The namespace for this + secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tokenConfig: + description: tokenConfig contains options for authorization and access + tokens + type: object + properties: + accessTokenInactivityTimeoutSeconds: + description: 'accessTokenInactivityTimeoutSeconds defines the default + token inactivity timeout for tokens granted by any client. The + value represents the maximum amount of time that can occur between + consecutive uses of the token. Tokens become invalid if they are + not used within this temporal window. The user will need to acquire + a new token to regain access once a token times out. Valid values + are integer values: x < 0 Tokens time out is enabled but tokens + never timeout unless configured per client (e.g. `-1`) x = 0 Tokens + time out is disabled (default) x > 0 Tokens time out if there + is no activity for x seconds The current minimum allowed value + for X is 300 (5 minutes)' + type: integer + format: int32 + accessTokenMaxAgeSeconds: + description: accessTokenMaxAgeSeconds defines the maximum age of + access tokens + type: integer + format: int32 + status: + description: OAuthStatus shows current known state of OAuth server in the + cluster + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml new file mode 100644 index 000000000..c1feca7b4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_project.crd.yaml @@ -0,0 +1,63 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: projects.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + versions: + - name: v1 + served: true + storage: true + names: + kind: Project + listKind: ProjectList + plural: projects + singular: project + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Project holds cluster-wide information about Project. The canonical + name is `cluster` + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + projectRequestMessage: + description: projectRequestMessage is the string presented to a user + if they are unable to request a project via the projectrequest api + endpoint + type: string + projectRequestTemplate: + description: projectRequestTemplate is the template to use for creating + projects in response to projectrequest. This must point to a template + in 'openshift-config' namespace. It is optional. If it is not specified, + a default template is used. + type: object + properties: + name: + description: name is the metadata.name of the referenced project + request template + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml new file mode 100644 index 000000000..43c9d8b2b --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_scheduler.crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: schedulers.config.openshift.io +spec: + group: config.openshift.io + scope: Cluster + names: + kind: Scheduler + singular: scheduler + plural: schedulers + listKind: SchedulerList + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: Scheduler holds cluster-wide config information to run the Kubernetes + Scheduler and influence its placement decisions. The canonical name for this + config is `cluster`. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + defaultNodeSelector: + description: 'defaultNodeSelector helps set the cluster-wide default + node selector to restrict pod placement to specific nodes. This is + applied to the pods created in all namespaces without a specified + nodeSelector value. For example, defaultNodeSelector: "type=user-node,region=east" + would set nodeSelector field in pod spec to "type=user-node,region=east" + to all pods created in all namespaces. Namespaces having project-wide + node selectors won''t be impacted even if this field is set. This + adds an annotation section to the namespace. For example, if a new + namespace is created with node-selector=''type=user-node,region=east'', + the annotation openshift.io/node-selector: type=user-node,region=east + gets added to the project. When the openshift.io/node-selector annotation + is set on the project the value is used in preference to the value + we are setting for defaultNodeSelector field. For instance, openshift.io/node-selector: + "type=user-node,region=west" means that the default of "type=user-node,region=east" + set in defaultNodeSelector would not be applied.' + type: string + mastersSchedulable: + description: 'MastersSchedulable allows masters nodes to be schedulable. + When this flag is turned on, all the master nodes in the cluster will + be made schedulable, so that workload pods can run on them. The default + value for this field is false, meaning none of the master nodes are + schedulable. Important Note: Once the workload pods start running + on the master nodes, extreme care must be taken to ensure that cluster-critical + control plane components are not impacted. Please turn on this field + after doing due diligence.' + type: boolean + policy: + description: policy is a reference to a ConfigMap containing scheduler + policy which has user specified predicates and priorities. If this + ConfigMap is not available scheduler will default to use DefaultAlgorithmProvider. + The namespace for this configmap is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config + map + type: string + status: + description: status holds observed values from the cluster. They may not + be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 452a09006..741db61f6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -42,6 +42,11 @@ type APIServerSpec struct { // encryption allows the configuration of encryption of resources at the datastore layer. // +optional Encryption APIServerEncryption `json:"encryption"` + // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. + // + // If unset, a default (which may change between releases) is chosen. + // +optional + TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` } type APIServerServingCerts struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 4a28bb0d1..af8e13217 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -103,9 +103,12 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning "SupportPodPidsLimit", // sig-pod, sjenning + "TLSSecurityProfile", // sig-network, danehans + "NodeDisruptionExclusion", // sig-scheduling, ccoleman + "ServiceNodeExclusion", // sig-scheduling, ccoleman }, Disabled: []string{ - "LocalStorageCapacityIsolation", // sig-pod, sjenning + "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman }, }, CustomNoUpgrade: { @@ -117,9 +120,12 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ "ExperimentalCriticalPodAnnotation", // sig-pod, sjenning "RotateKubeletServerCertificate", // sig-pod, sjenning "SupportPodPidsLimit", // sig-pod, sjenning + "TLSSecurityProfile", // sig-network, danehans + "NodeDisruptionExclusion", // sig-scheduling, ccoleman + "ServiceNodeExclusion", // sig-scheduling, ccoleman }, Disabled: []string{ - "LocalStorageCapacityIsolation", // sig-pod, sjenning + "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman }, }, LatencySensitive: { @@ -128,9 +134,11 @@ var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ "RotateKubeletServerCertificate", // sig-pod, sjenning "SupportPodPidsLimit", // sig-pod, sjenning "TopologyManager", // sig-pod, sjenning + "NodeDisruptionExclusion", // sig-scheduling, ccoleman + "ServiceNodeExclusion", // sig-scheduling, ccoleman }, Disabled: []string{ - "LocalStorageCapacityIsolation", // sig-pod, sjenning + "LegacyNodeRoleBehavior", // sig-scheduling, ccoleman }, }, } diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index 4632e6ada..f8f5b9497 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -130,6 +130,10 @@ type PlatformStatus struct { // OpenStack contains settings specific to the OpenStack infrastructure provider. // +optional OpenStack *OpenStackPlatformStatus `json:"openstack,omitempty"` + + // Ovirt contains settings specific to the oVirt infrastructure provider. + // +optional + Ovirt *OvirtPlatformStatus `json:"ovirt,omitempty"` } // AWSPlatformStatus holds the current status of the Amazon Web Services infrastructure provider. @@ -142,6 +146,11 @@ type AWSPlatformStatus struct { type AzurePlatformStatus struct { // resourceGroupName is the Resource Group for new Azure resources created for the cluster. ResourceGroupName string `json:"resourceGroupName"` + + // networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. + // If empty, the value is same as ResourceGroupName. + // +optional + NetworkResourceGroupName string `json:"networkResourceGroupName,omitempty"` } // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. @@ -154,6 +163,8 @@ type GCPPlatformStatus struct { } // BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. +// For more information about the network architecture used with the BareMetal platform type, see: +// https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md type BareMetalPlatformStatus struct { // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used // by components inside the cluster, like kubelets using the infrastructure rather @@ -199,6 +210,27 @@ type OpenStackPlatformStatus struct { NodeDNSIP string `json:"nodeDNSIP,omitempty"` } +// OvirtPlatformStatus holds the current status of the oVirt infrastructure provider. +type OvirtPlatformStatus struct { + // apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used + // by components inside the cluster, like kubelets using the infrastructure rather + // than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI + // points to. It is the IP for a self-hosted load balancer in front of the API servers. + APIServerInternalIP string `json:"apiServerInternalIP,omitempty"` + + // ingressIP is an external IP which routes to the default ingress controller. + // The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + IngressIP string `json:"ingressIP,omitempty"` + + // nodeDNSIP is the IP address for the internal DNS used by the + // nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` + // provides name resolution for the nodes themselves. There is no DNS-as-a-service for + // oVirt deployments. In order to minimize necessary changes to the + // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames + // to the nodes in the cluster. + NodeDNSIP string `json:"nodeDNSIP,omitempty"` +} + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // InfrastructureList is diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 31291dec2..1d998bf37 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -70,7 +70,7 @@ type HubSource struct { // HubSourceStatus is used to reflect the current state of applying the // configuration to a default source type HubSourceStatus struct { - HubSource `json:"",omitempty` + HubSource `json:",omitempty"` // status indicates success or failure in applying the configuration Status string `json:"status,omitempty"` // message provides more information regarding failures diff --git a/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go new file mode 100644 index 000000000..71cb39102 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_tlssecurityprofile.go @@ -0,0 +1,235 @@ +package v1 + +// TLSSecurityProfile defines the schema for a TLS security profile. This object +// is used by operators to apply TLS security settings to operands. +// +union +type TLSSecurityProfile struct { + // type is one of Old, Intermediate, Modern or Custom. Custom provides + // the ability to specify individual TLS security profile parameters. + // Old, Intermediate and Modern are TLS security profiles based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations + // + // The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers + // are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be + // reduced. + // + // +unionDiscriminator + // +optional + Type TLSProfileType `json:"type"` + // old is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-ECDSA-AES128-SHA256 + // - ECDHE-RSA-AES128-SHA256 + // - ECDHE-ECDSA-AES128-SHA + // - ECDHE-RSA-AES128-SHA + // - ECDHE-RSA-AES256-SHA384 + // - ECDHE-ECDSA-AES256-SHA + // - ECDHE-RSA-AES256-SHA + // - AES128-GCM-SHA256 + // - AES256-GCM-SHA384 + // - AES128-SHA256 + // - AES128-SHA + // - AES256-SHA + // - DES-CBC3-SHA + // minTLSVersion: TLSv1.0 + // + // +optional + // +nullable + Old *OldTLSProfile `json:"old,omitempty"` + // intermediate is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES256-GCM-SHA384 + // - ECDHE-RSA-AES256-GCM-SHA384 + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // minTLSVersion: TLSv1.2 + // + // +optional + // +nullable + Intermediate *IntermediateTLSProfile `json:"intermediate,omitempty"` + // modern is a TLS security profile based on: + // + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + // + // and looks like this (yaml): + // + // ciphers: + // - TLS_AES_128_GCM_SHA256 + // - TLS_AES_256_GCM_SHA384 + // - TLS_CHACHA20_POLY1305_SHA256 + // minTLSVersion: TLSv1.3 + // + // +optional + // +nullable + Modern *ModernTLSProfile `json:"modern,omitempty"` + // custom is a user-defined TLS security profile. Be extremely careful using a custom + // profile as invalid configurations can be catastrophic. An example custom profile + // looks like this: + // + // ciphers: + // - ECDHE-ECDSA-CHACHA20-POLY1305 + // - ECDHE-RSA-CHACHA20-POLY1305 + // - ECDHE-RSA-AES128-GCM-SHA256 + // - ECDHE-ECDSA-AES128-GCM-SHA256 + // minTLSVersion: TLSv1.1 + // + // +optional + // +nullable + Custom *CustomTLSProfile `json:"custom,omitempty"` +} + +// OldTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility +type OldTLSProfile struct{} + +// IntermediateTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 +type IntermediateTLSProfile struct{} + +// ModernTLSProfile is a TLS security profile based on: +// https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility +type ModernTLSProfile struct{} + +// CustomTLSProfile is a user-defined TLS security profile. Be extremely careful +// using a custom TLS profile as invalid configurations can be catastrophic. +type CustomTLSProfile struct { + TLSProfileSpec `json:",inline"` +} + +// TLSProfileType defines a TLS security profile type. +type TLSProfileType string + +const ( + // Old is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility + TLSProfileOldType TLSProfileType = "Old" + // Intermediate is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29 + TLSProfileIntermediateType TLSProfileType = "Intermediate" + // Modern is a TLS security profile based on: + // https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility + TLSProfileModernType TLSProfileType = "Modern" + // Custom is a TLS security profile that allows for user-defined parameters. + TLSProfileCustomType TLSProfileType = "Custom" +) + +// TLSProfileSpec is the desired behavior of a TLSSecurityProfile. +type TLSProfileSpec struct { + // ciphers is used to specify the cipher algorithms that are negotiated + // during the TLS handshake. Operators may remove entries their operands + // do not support. For example, to use DES-CBC3-SHA (yaml): + // + // ciphers: + // - DES-CBC3-SHA + // + Ciphers []string `json:"ciphers"` + // minTLSVersion is used to specify the minimal version of the TLS protocol + // that is negotiated during the TLS handshake. For example, to use TLS + // versions 1.1, 1.2 and 1.3 (yaml): + // + // minTLSVersion: TLSv1.1 + // + MinTLSVersion TLSProtocolVersion `json:"minTLSVersion"` +} + +// TLSProtocolVersion is a way to specify the protocol version used for TLS connections. +// Protocol versions are based on the following most common TLS configurations: +// +// https://ssl-config.mozilla.org/ +// +// Note that SSLv3.0 is not a supported protocol version due to well known +// vulnerabilities such as POODLE: https://en.wikipedia.org/wiki/POODLE +type TLSProtocolVersion string + +const ( + // VersionTLSv10 is version 1.0 of the TLS security protocol. + VersionTLS10 TLSProtocolVersion = "VersionTLSv10" + // VersionTLSv11 is version 1.1 of the TLS security protocol. + VersionTLS11 TLSProtocolVersion = "VersionTLSv11" + // VersionTLSv12 is version 1.2 of the TLS security protocol. + VersionTLS12 TLSProtocolVersion = "VersionTLSv12" + // VersionTLSv13 is version 1.3 of the TLS security protocol. + VersionTLS13 TLSProtocolVersion = "VersionTLSv13" +) + +// TLSProfiles Contains a map of TLSProfileType names to TLSProfileSpec. +// +// NOTE: The caller needs to make sure to check that these constants are valid for their binary. Not all +// entries map to values for all binaries. In the case of ties, the kube-apiserver wins. Do not fail, +// just be sure to whitelist only and everything will be ok. +var TLSProfiles = map[TLSProfileType]*TLSProfileSpec{ + TLSProfileOldType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + "ECDHE-ECDSA-AES128-SHA256", + "ECDHE-RSA-AES128-SHA256", + "ECDHE-ECDSA-AES128-SHA", + "ECDHE-RSA-AES128-SHA", + "ECDHE-RSA-AES256-SHA384", + "ECDHE-ECDSA-AES256-SHA", + "ECDHE-RSA-AES256-SHA", + "AES128-GCM-SHA256", + "AES256-GCM-SHA384", + "AES128-SHA256", + "AES128-SHA", + "AES256-SHA", + "DES-CBC3-SHA", + }, + MinTLSVersion: VersionTLS10, + }, + TLSProfileIntermediateType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + "ECDHE-ECDSA-AES128-GCM-SHA256", + "ECDHE-RSA-AES128-GCM-SHA256", + "ECDHE-ECDSA-AES256-GCM-SHA384", + "ECDHE-RSA-AES256-GCM-SHA384", + "ECDHE-ECDSA-CHACHA20-POLY1305", + "ECDHE-RSA-CHACHA20-POLY1305", + }, + MinTLSVersion: VersionTLS12, + }, + TLSProfileModernType: { + Ciphers: []string{ + "TLS_AES_128_GCM_SHA256", + "TLS_AES_256_GCM_SHA384", + "TLS_CHACHA20_POLY1305_SHA256", + }, + MinTLSVersion: VersionTLS13, + }, +} diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 571be5a90..37888a939 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -142,6 +142,11 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { copy(*out, *in) } out.Encryption = in.Encryption + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } return } @@ -1039,6 +1044,23 @@ func (in *CustomFeatureGates) DeepCopy() *CustomFeatureGates { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomTLSProfile) DeepCopyInto(out *CustomTLSProfile) { + *out = *in + in.TLSProfileSpec.DeepCopyInto(&out.TLSProfileSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomTLSProfile. +func (in *CustomTLSProfile) DeepCopy() *CustomTLSProfile { + if in == nil { + return nil + } + out := new(CustomTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNS) DeepCopyInto(out *DNS) { *out = *in @@ -2017,6 +2039,22 @@ func (in *IngressStatus) DeepCopy() *IngressStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IntermediateTLSProfile) DeepCopyInto(out *IntermediateTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IntermediateTLSProfile. +func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { + if in == nil { + return nil + } + out := new(IntermediateTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { *out = *in @@ -2125,6 +2163,22 @@ func (in *LeaderElection) DeepCopy() *LeaderElection { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ModernTLSProfile) DeepCopyInto(out *ModernTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ModernTLSProfile. +func (in *ModernTLSProfile) DeepCopy() *ModernTLSProfile { + if in == nil { + return nil + } + out := new(ModernTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { *out = *in @@ -2421,6 +2475,22 @@ func (in *ObjectReference) DeepCopy() *ObjectReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OldTLSProfile) DeepCopyInto(out *OldTLSProfile) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OldTLSProfile. +func (in *OldTLSProfile) DeepCopy() *OldTLSProfile { + if in == nil { + return nil + } + out := new(OldTLSProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { *out = *in @@ -2618,6 +2688,22 @@ func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformStatus. +func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { + if in == nil { + return nil + } + out := new(OvirtPlatformStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { *out = *in @@ -2646,6 +2732,11 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { *out = new(OpenStackPlatformStatus) **out = **in } + if in.Ovirt != nil { + in, out := &in.Ovirt, &out.Ovirt + *out = new(OvirtPlatformStatus) + **out = **in + } return } @@ -3130,6 +3221,63 @@ func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSProfileSpec) DeepCopyInto(out *TLSProfileSpec) { + *out = *in + if in.Ciphers != nil { + in, out := &in.Ciphers, &out.Ciphers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSProfileSpec. +func (in *TLSProfileSpec) DeepCopy() *TLSProfileSpec { + if in == nil { + return nil + } + out := new(TLSProfileSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSSecurityProfile) DeepCopyInto(out *TLSSecurityProfile) { + *out = *in + if in.Old != nil { + in, out := &in.Old, &out.Old + *out = new(OldTLSProfile) + **out = **in + } + if in.Intermediate != nil { + in, out := &in.Intermediate, &out.Intermediate + *out = new(IntermediateTLSProfile) + **out = **in + } + if in.Modern != nil { + in, out := &in.Modern, &out.Modern + *out = new(ModernTLSProfile) + **out = **in + } + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(CustomTLSProfile) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSSecurityProfile. +func (in *TLSSecurityProfile) DeepCopy() *TLSSecurityProfile { + if in == nil { + return nil + } + out := new(TLSSecurityProfile) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TemplateReference) DeepCopyInto(out *TemplateReference) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index d48870185..bc7f76db3 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -282,6 +282,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen.", } func (APIServerSpec) SwaggerDoc() map[string]string { @@ -738,8 +739,9 @@ func (AWSPlatformStatus) SwaggerDoc() map[string]string { } var map_AzurePlatformStatus = map[string]string{ - "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", - "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "": "AzurePlatformStatus holds the current status of the Azure infrastructure provider.", + "resourceGroupName": "resourceGroupName is the Resource Group for new Azure resources created for the cluster.", + "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -747,7 +749,7 @@ func (AzurePlatformStatus) SwaggerDoc() map[string]string { } var map_BareMetalPlatformStatus = map[string]string{ - "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider.", + "": "BareMetalPlatformStatus holds the current status of the BareMetal infrastructure provider. For more information about the network architecture used with the BareMetal platform type, see: https://github.com/openshift/installer/blob/master/docs/design/baremetal/networking-infrastructure.md", "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", @@ -822,6 +824,17 @@ func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { return map_OpenStackPlatformStatus } +var map_OvirtPlatformStatus = map[string]string{ + "": "OvirtPlatformStatus holds the current status of the oVirt infrastructure provider.", + "apiServerInternalIP": "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers.", + "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.", + "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for oVirt deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", +} + +func (OvirtPlatformStatus) SwaggerDoc() map[string]string { + return map_OvirtPlatformStatus +} + var map_PlatformStatus = map[string]string{ "": "PlatformStatus holds the current status specific to the underlying infrastructure provider of the current cluster. Since these are used at status-level for the underlying cluster, it is supposed that only one of the status structs is set.", "type": "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform.", @@ -830,6 +843,7 @@ var map_PlatformStatus = map[string]string{ "gcp": "GCP contains settings specific to the Google Cloud Platform infrastructure provider.", "baremetal": "BareMetal contains settings specific to the BareMetal platform.", "openstack": "OpenStack contains settings specific to the OpenStack infrastructure provider.", + "ovirt": "Ovirt contains settings specific to the oVirt infrastructure provider.", } func (PlatformStatus) SwaggerDoc() map[string]string { @@ -1319,4 +1333,59 @@ func (SchedulerSpec) SwaggerDoc() map[string]string { return map_SchedulerSpec } +var map_CustomTLSProfile = map[string]string{ + "": "CustomTLSProfile is a user-defined TLS security profile. Be extremely careful using a custom TLS profile as invalid configurations can be catastrophic.", +} + +func (CustomTLSProfile) SwaggerDoc() map[string]string { + return map_CustomTLSProfile +} + +var map_IntermediateTLSProfile = map[string]string{ + "": "IntermediateTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28default.29", +} + +func (IntermediateTLSProfile) SwaggerDoc() map[string]string { + return map_IntermediateTLSProfile +} + +var map_ModernTLSProfile = map[string]string{ + "": "ModernTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility", +} + +func (ModernTLSProfile) SwaggerDoc() map[string]string { + return map_ModernTLSProfile +} + +var map_OldTLSProfile = map[string]string{ + "": "OldTLSProfile is a TLS security profile based on: https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility", +} + +func (OldTLSProfile) SwaggerDoc() map[string]string { + return map_OldTLSProfile +} + +var map_TLSProfileSpec = map[string]string{ + "": "TLSProfileSpec is the desired behavior of a TLSSecurityProfile.", + "ciphers": "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml):\n\n ciphers:\n - DES-CBC3-SHA", + "minTLSVersion": "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml):\n\n minTLSVersion: TLSv1.1", +} + +func (TLSProfileSpec) SwaggerDoc() map[string]string { + return map_TLSProfileSpec +} + +var map_TLSSecurityProfile = map[string]string{ + "": "TLSSecurityProfile defines the schema for a TLS security profile. This object is used by operators to apply TLS security settings to operands.", + "type": "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations\n\nThe profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced.", + "old": "old is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-ECDSA-AES128-SHA256\n - ECDHE-RSA-AES128-SHA256\n - ECDHE-ECDSA-AES128-SHA\n - ECDHE-RSA-AES128-SHA\n - ECDHE-RSA-AES256-SHA384\n - ECDHE-ECDSA-AES256-SHA\n - ECDHE-RSA-AES256-SHA\n - AES128-GCM-SHA256\n - AES256-GCM-SHA384\n - AES128-SHA256\n - AES128-SHA\n - AES256-SHA\n - DES-CBC3-SHA\n minTLSVersion: TLSv1.0", + "intermediate": "intermediate is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES256-GCM-SHA384\n - ECDHE-RSA-AES256-GCM-SHA384\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n minTLSVersion: TLSv1.2", + "modern": "modern is a TLS security profile based on:\n\nhttps://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility\n\nand looks like this (yaml):\n\n ciphers:\n - TLS_AES_128_GCM_SHA256\n - TLS_AES_256_GCM_SHA384\n - TLS_CHACHA20_POLY1305_SHA256\n minTLSVersion: TLSv1.3", + "custom": "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this:\n\n ciphers:\n - ECDHE-ECDSA-CHACHA20-POLY1305\n - ECDHE-RSA-CHACHA20-POLY1305\n - ECDHE-RSA-AES128-GCM-SHA256\n - ECDHE-ECDSA-AES128-GCM-SHA256\n minTLSVersion: TLSv1.1", +} + +func (TLSSecurityProfile) SwaggerDoc() map[string]string { + return map_TLSSecurityProfile +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleclidownload.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleclidownload.crd.yaml new file mode 100644 index 000000000..5dbe0acd1 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleclidownload.crd.yaml @@ -0,0 +1,85 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consoleclidownloads.console.openshift.io + annotations: + displayName: ConsoleCLIDownload + description: Extension for configuring openshift web console command line interface + (CLI) downloads. +spec: + scope: Cluster + group: console.openshift.io + versions: + - name: v1 + served: true + storage: true + names: + plural: consoleclidownloads + singular: consoleclidownload + kind: ConsoleCLIDownload + listKind: ConsoleCLIDownloadList + additionalPrinterColumns: + - name: Display name + type: string + JSONPath: .spec.displayName + - name: Age + type: string + JSONPath: .metadata.creationTimestamp + - name: Description + type: string + JSONPath: .spec.description + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ConsoleCLIDownload is an extension for configuring openshift web + console command line interface (CLI) downloads. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: ConsoleCLIDownloadSpec is the desired cli download configuration. + type: object + required: + - description + - displayName + - links + properties: + description: + description: description is the description of the CLI download (can + include markdown). + type: string + displayName: + description: displayName is the display name of the CLI download. + type: string + links: + description: links is a list of objects that provide CLI download link + details. + type: array + items: + type: object + required: + - href + properties: + href: + description: href is the absolute secure URL for the link (must + use https) + type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ + text: + description: text is the display text for the link + type: string diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleexternalloglink.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleexternalloglink.crd.yaml new file mode 100644 index 000000000..7d7547301 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consoleexternalloglink.crd.yaml @@ -0,0 +1,87 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consoleexternalloglinks.console.openshift.io + annotations: + displayName: ConsoleExternalLogLinks + description: ConsoleExternalLogLink is an extension for customizing OpenShift + web console log links. +spec: + scope: Cluster + group: console.openshift.io + versions: + - name: v1 + served: true + storage: true + names: + plural: consoleexternalloglinks + singular: consoleexternalloglink + kind: ConsoleExternalLogLink + listKind: ConsoleExternalLogLinkList + additionalPrinterColumns: + - name: Text + type: string + JSONPath: .spec.text + - name: HrefTemplate + type: string + JSONPath: .spec.hrefTemplate + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ConsoleExternalLogLink is an extension for customizing OpenShift + web console log links. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: ConsoleExternalLogLinkSpec is the desired log link configuration. + The log link will appear on the logs tab of the pod details page. + type: object + required: + - hrefTemplate + - text + properties: + hrefTemplate: + description: "hrefTemplate is an absolute secure URL (must use https) + for the log link including variables to be replaced. Variables are + specified in the URL with the format ${variableName}, for instance, + ${containerName} and will be replaced with the corresponding values + from the resource. Resource is a pod. Supported variables are: - ${resourceName} + - name of the resource which containes the logs - ${resourceUID} - + UID of the resource which contains the logs - e.g. `11111111-2222-3333-4444-555555555555` + - ${containerName} - name of the resource's container that contains + the logs - ${resourceNamespace} - namespace of the resource that contains + the logs - ${resourceNamespaceUID} - namespace UID of the resource + that contains the logs - ${podLabels} - JSON representation of labels + matching the pod with the logs - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}` + \n e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}" + type: string + pattern: ^https:// + namespaceFilter: + description: namespaceFilter is a regular expression used to restrict + a log link to a matching set of namespaces (e.g., `^openshift-`). + The string is converted into a regular expression using the JavaScript + RegExp constructor. If not specified, links will be displayed for + all the namespaces. + type: string + text: + description: text is the display text for the link + type: string diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolelink.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolelink.crd.yaml new file mode 100644 index 000000000..970cd3231 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolelink.crd.yaml @@ -0,0 +1,112 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consolelinks.console.openshift.io + annotations: + displayName: ConsoleLinks + description: Extension for customizing OpenShift web console links +spec: + scope: Cluster + group: console.openshift.io + versions: + - name: v1 + served: true + storage: true + names: + plural: consolelinks + singular: consolelink + kind: ConsoleLink + listKind: ConsoleLinkList + additionalPrinterColumns: + - name: Text + type: string + JSONPath: .spec.text + - name: URL + type: string + JSONPath: .spec.href + - name: Menu + type: string + JSONPath: .spec.menu + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ConsoleLink is an extension for customizing OpenShift web console + links. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: ConsoleLinkSpec is the desired console link configuration. + type: object + required: + - href + - location + - text + properties: + applicationMenu: + description: applicationMenu holds information about section and icon + used for the link in the application menu, and it is applicable only + when location is set to ApplicationMenu. + type: object + required: + - section + properties: + imageURL: + description: imageUrl is the URL for the icon used in front of the + link in the application menu. The URL must be an HTTPS URL or + a Data URI. The image should be square and will be shown at 24x24 + pixels. + type: string + section: + description: section is the section of the application menu in which + the link should appear. This can be any text that will appear + as a subheading in the application menu dropdown. A new section + will be created if the text does not match text of an existing + section. + type: string + href: + description: href is the absolute secure URL for the link (must use + https) + type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ + location: + description: location determines which location in the console the link + will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). + type: string + pattern: ^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$ + namespaceDashboard: + description: namespaceDashboard holds information about namespaces in + which the dashboard link should appear, and it is applicable only + when location is set to NamespaceDashboard. If not specified, the + link will appear in all namespaces. + type: object + required: + - namespaces + properties: + namespaces: + description: namespaces is an array of namespace names in which + the dashboard link should appear. + type: array + items: + type: string + text: + description: text is the display text for the link + type: string diff --git a/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolenotification.crd.yaml b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolenotification.crd.yaml new file mode 100644 index 000000000..2c81d9347 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/0000_10_config-operator_01_consolenotification.crd.yaml @@ -0,0 +1,89 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: consolenotifications.console.openshift.io + annotations: + displayName: ConsoleNotification + description: Extension for configuring openshift web console notifications. +spec: + scope: Cluster + group: console.openshift.io + versions: + - name: v1 + served: true + storage: true + names: + plural: consolenotifications + singular: consolenotification + kind: ConsoleNotification + listKind: ConsoleNotificationList + additionalPrinterColumns: + - name: Text + type: string + JSONPath: .spec.text + - name: Location + type: string + JSONPath: .spec.location + - name: Age + type: date + JSONPath: .metadata.creationTimestamp + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ConsoleNotification is the extension for configuring openshift + web console notifications. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: ConsoleNotificationSpec is the desired console notification + configuration. + type: object + required: + - text + properties: + backgroundColor: + description: backgroundColor is the color of the background for the + notification as CSS data type color. + type: string + color: + description: color is the color of the text for the notification as + CSS data type color. + type: string + link: + description: link is an object that holds notification link details. + type: object + required: + - href + - text + properties: + href: + description: href is the absolute secure URL for the link (must + use https) + type: string + pattern: ^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ + text: + description: text is the display text for the link + type: string + location: + description: location is the location of the notification in the console. + type: string + pattern: ^(BannerTop|BannerBottom|BannerTopBottom)$ + text: + description: text is the visible text of the notification. + type: string diff --git a/vendor/github.com/openshift/api/console/v1/register.go b/vendor/github.com/openshift/api/console/v1/register.go index 98363daa3..87e0c8ed7 100644 --- a/vendor/github.com/openshift/api/console/v1/register.go +++ b/vendor/github.com/openshift/api/console/v1/register.go @@ -39,6 +39,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ConsoleNotificationList{}, &ConsoleExternalLogLink{}, &ConsoleExternalLogLinkList{}, + &ConsoleYAMLSample{}, + &ConsoleYAMLSampleList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/console/v1/types.go b/vendor/github.com/openshift/api/console/v1/types.go index 3dea7005a..2a350a492 100644 --- a/vendor/github.com/openshift/api/console/v1/types.go +++ b/vendor/github.com/openshift/api/console/v1/types.go @@ -5,5 +5,6 @@ type Link struct { // text is the display text for the link Text string `json:"text"` // href is the absolute secure URL for the link (must use https) + // +kubebuilder:validation:Pattern=^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ Href string `json:"href"` } diff --git a/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go index 00844a03c..d7f9367ff 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go @@ -21,7 +21,16 @@ type ConsoleCLIDownloadSpec struct { // description is the description of the CLI download (can include markdown). Description string `json:"description"` // links is a list of objects that provide CLI download link details. - Links []Link `json:"links"` + Links []CLIDownloadLink `json:"links"` +} + +type CLIDownloadLink struct { + // text is the display text for the link + // +optional + Text string `json:"text"` + // href is the absolute secure URL for the link (must use https) + // +kubebuilder:validation:Pattern=^https://([\w-]+.)+[\w-]+(/[\w- ./?%&=])?$ + Href string `json:"href"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go index c9c3317d5..9aecac768 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go @@ -29,10 +29,12 @@ type ConsoleExternalLogLinkSpec struct { // - e.g. `11111111-2222-3333-4444-555555555555` // - ${containerName} - name of the resource's container that contains the logs // - ${resourceNamespace} - namespace of the resource that contains the logs + // - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs // - ${podLabels} - JSON representation of labels matching the pod with the logs // - e.g. `{"key1":"value1","key2":"value2"}` // // e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels} + // +kubebuilder:validation:Pattern=^https:// HrefTemplate string `json:"hrefTemplate"` // namespaceFilter is a regular expression used to restrict a log link to a // matching set of namespaces (e.g., `^openshift-`). The string is converted diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go index 9b7c4ba30..e276eb73a 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_link.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go @@ -17,7 +17,7 @@ type ConsoleLink struct { // ConsoleLinkSpec is the desired console link configuration. type ConsoleLinkSpec struct { Link `json:",inline"` - // location determines which location in the console the link will be appended to. + // location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). Location ConsoleLinkLocation `json:"location"` // applicationMenu holds information about section and icon used for the link in the // application menu, and it is applicable only when location is set to ApplicationMenu. @@ -35,6 +35,8 @@ type ConsoleLinkSpec struct { // ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu. type ApplicationMenuSpec struct { // section is the section of the application menu in which the link should appear. + // This can be any text that will appear as a subheading in the application menu dropdown. + // A new section will be created if the text does not match text of an existing section. Section string `json:"section"` // imageUrl is the URL for the icon used in front of the link in the application menu. // The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. @@ -49,6 +51,7 @@ type NamespaceDashboardSpec struct { } // ConsoleLinkLocationSelector is a set of possible menu targets to which a link may be appended. +// +kubebuilder:validation:Pattern=^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$ type ConsoleLinkLocation string const ( diff --git a/vendor/github.com/openshift/api/console/v1/types_console_notification.go b/vendor/github.com/openshift/api/console/v1/types_console_notification.go index b4347fe3a..8f716036b 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_notification.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_notification.go @@ -34,6 +34,7 @@ type ConsoleNotificationSpec struct { // ConsoleNotificationLocationSelector is a set of possible notification targets // to which a notification may be appended. +// +kubebuilder:validation:Pattern=^(BannerTop|BannerBottom|BannerTopBottom)$ type ConsoleNotificationLocation string const ( diff --git a/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go new file mode 100644 index 000000000..c352abb17 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go @@ -0,0 +1,51 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples. +type ConsoleYAMLSample struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ObjectMeta `json:"metadata"` + Spec ConsoleYAMLSampleSpec `json:"spec"` +} + +// ConsoleYAMLSampleSpec is the desired YAML sample configuration. +// Samples will appear with their descriptions in a samples sidebar +// when creating a resources in the web console. +type ConsoleYAMLSampleSpec struct { + // targetResource contains apiVersion and kind of the resource + // YAML sample is representating. + TargetResource metav1.TypeMeta `json:",targetResource"` + // title of the YAML sample. + Title ConsoleYAMLSampleTitle `json:"title"` + // description of the YAML sample. + Description ConsoleYAMLSampleDescription `json:"description"` + // yaml is the YAML sample to display. + YAML ConsoleYAMLSampleYAML `json:"yaml"` +} + +// ConsoleYAMLSampleTitle of the YAML sample. +// +kubebuilder:validation:Pattern=^(.|\s)*\S(.|\s)*$ +type ConsoleYAMLSampleTitle string + +// ConsoleYAMLSampleDescription of the YAML sample. +// +kubebuilder:validation:Pattern=^(.|\s)*\S(.|\s)*$ +type ConsoleYAMLSampleDescription string + +// ConsoleYAMLSampleYAML is the YAML sample to display. +// +kubebuilder:validation:Pattern=^(.|\s)*\S(.|\s)*$ +type ConsoleYAMLSampleYAML string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ConsoleYAMLSampleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + metav1.ListMeta `json:"metadata"` + Items []ConsoleYAMLSample `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index 9c32f2d82..d1231420b 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -24,6 +24,22 @@ func (in *ApplicationMenuSpec) DeepCopy() *ApplicationMenuSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CLIDownloadLink) DeepCopyInto(out *CLIDownloadLink) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CLIDownloadLink. +func (in *CLIDownloadLink) DeepCopy() *CLIDownloadLink { + if in == nil { + return nil + } + out := new(CLIDownloadLink) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ConsoleCLIDownload) DeepCopyInto(out *ConsoleCLIDownload) { *out = *in @@ -89,7 +105,7 @@ func (in *ConsoleCLIDownloadSpec) DeepCopyInto(out *ConsoleCLIDownloadSpec) { *out = *in if in.Links != nil { in, out := &in.Links, &out.Links - *out = make([]Link, len(*in)) + *out = make([]CLIDownloadLink, len(*in)) copy(*out, *in) } return @@ -349,6 +365,83 @@ func (in *ConsoleNotificationSpec) DeepCopy() *ConsoleNotificationSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSample) DeepCopyInto(out *ConsoleYAMLSample) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSample. +func (in *ConsoleYAMLSample) DeepCopy() *ConsoleYAMLSample { + if in == nil { + return nil + } + out := new(ConsoleYAMLSample) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleYAMLSample) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSampleList) DeepCopyInto(out *ConsoleYAMLSampleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleYAMLSample, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSampleList. +func (in *ConsoleYAMLSampleList) DeepCopy() *ConsoleYAMLSampleList { + if in == nil { + return nil + } + out := new(ConsoleYAMLSampleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleYAMLSampleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSampleSpec) DeepCopyInto(out *ConsoleYAMLSampleSpec) { + *out = *in + out.TargetResource = in.TargetResource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSampleSpec. +func (in *ConsoleYAMLSampleSpec) DeepCopy() *ConsoleYAMLSampleSpec { + if in == nil { + return nil + } + out := new(ConsoleYAMLSampleSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Link) DeepCopyInto(out *Link) { *out = *in diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go index 5d9729ed0..9d85f7957 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -21,6 +21,15 @@ func (Link) SwaggerDoc() map[string]string { return map_Link } +var map_CLIDownloadLink = map[string]string{ + "text": "text is the display text for the link", + "href": "href is the absolute secure URL for the link (must use https)", +} + +func (CLIDownloadLink) SwaggerDoc() map[string]string { + return map_CLIDownloadLink +} + var map_ConsoleCLIDownload = map[string]string{ "": "ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads.", "metadata": "Standard object's metadata.", @@ -69,7 +78,7 @@ func (ConsoleExternalLogLinkList) SwaggerDoc() map[string]string { var map_ConsoleExternalLogLinkSpec = map[string]string{ "": "ConsoleExternalLogLinkSpec is the desired log link configuration. The log link will appear on the logs tab of the pod details page.", "text": "text is the display text for the link", - "hrefTemplate": "hrefTemplate is an absolute secure URL (must use https) for the log link including variables to be replaced. Variables are specified in the URL with the format ${variableName}, for instance, ${containerName} and will be replaced with the corresponding values from the resource. Resource is a pod. Supported variables are: - ${resourceName} - name of the resource which containes the logs - ${resourceUID} - UID of the resource which contains the logs\n - e.g. `11111111-2222-3333-4444-555555555555`\n- ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains the logs - ${podLabels} - JSON representation of labels matching the pod with the logs\n - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}`\n\ne.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}", + "hrefTemplate": "hrefTemplate is an absolute secure URL (must use https) for the log link including variables to be replaced. Variables are specified in the URL with the format ${variableName}, for instance, ${containerName} and will be replaced with the corresponding values from the resource. Resource is a pod. Supported variables are: - ${resourceName} - name of the resource which containes the logs - ${resourceUID} - UID of the resource which contains the logs\n - e.g. `11111111-2222-3333-4444-555555555555`\n- ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains the logs - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs - ${podLabels} - JSON representation of labels matching the pod with the logs\n - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}`\n\ne.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}", "namespaceFilter": "namespaceFilter is a regular expression used to restrict a log link to a matching set of namespaces (e.g., `^openshift-`). The string is converted into a regular expression using the JavaScript RegExp constructor. If not specified, links will be displayed for all the namespaces.", } @@ -79,7 +88,7 @@ func (ConsoleExternalLogLinkSpec) SwaggerDoc() map[string]string { var map_ApplicationMenuSpec = map[string]string{ "": "ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.", - "section": "section is the section of the application menu in which the link should appear.", + "section": "section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section.", "imageURL": "imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", } @@ -106,7 +115,7 @@ func (ConsoleLinkList) SwaggerDoc() map[string]string { var map_ConsoleLinkSpec = map[string]string{ "": "ConsoleLinkSpec is the desired console link configuration.", - "location": "location determines which location in the console the link will be appended to.", + "location": "location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard).", "applicationMenu": "applicationMenu holds information about section and icon used for the link in the application menu, and it is applicable only when location is set to ApplicationMenu.", "namespaceDashboard": "namespaceDashboard holds information about namespaces in which the dashboard link should appear, and it is applicable only when location is set to NamespaceDashboard. If not specified, the link will appear in all namespaces.", } @@ -154,4 +163,33 @@ func (ConsoleNotificationSpec) SwaggerDoc() map[string]string { return map_ConsoleNotificationSpec } +var map_ConsoleYAMLSample = map[string]string{ + "": "ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples.", + "metadata": "Standard object's metadata.", +} + +func (ConsoleYAMLSample) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSample +} + +var map_ConsoleYAMLSampleList = map[string]string{ + "metadata": "Standard object's metadata.", +} + +func (ConsoleYAMLSampleList) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSampleList +} + +var map_ConsoleYAMLSampleSpec = map[string]string{ + "": "ConsoleYAMLSampleSpec is the desired YAML sample configuration. Samples will appear with their descriptions in a samples sidebar when creating a resources in the web console.", + "TargetResource": "targetResource contains apiVersion and kind of the resource YAML sample is representating.", + "title": "title of the YAML sample.", + "description": "description of the YAML sample.", + "yaml": "yaml is the YAML sample to display.", +} + +func (ConsoleYAMLSampleSpec) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSampleSpec +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/Makefile b/vendor/github.com/openshift/api/hack/alpha-build-machinery/Makefile new file mode 100644 index 000000000..b44d020e4 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/Makefile @@ -0,0 +1,61 @@ +SHELL :=/bin/bash +all: verify +.PHONY: all + +makefiles :=$(wildcard ./make/*.example.mk) +examples :=$(wildcard ./make/examples/*/Makefile.test) + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +# We need to change dir to the final makefile directory or relative paths won't match. +# Dynamic values are replaced with "" so we can do diff against checkout versions. +# Avoid comparing local paths by stripping the prefix. +# Delete lines referencing temporary files and directories +# Unify make error output between versions +# Ignore old cp errors on centos7 +# Ignore different make output with `-k` option +define update-makefile-log +mkdir -p "$(3)" +set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ + sed 's/\.\(buildDate\|versionFromGit\|commitFromGit\|gitTreeState\)="[^"]*" /.\1="" /g' | \ + sed -E 's~/.*/(github.com/openshift/library-go/alpha-build-machinery/.*)~/\1~g' | \ + sed '/\/tmp\/tmp./d' | \ + sed '/git checkout -b/d' | \ + sed -E 's~^[<> ]*((\+\+\+|\-\-\-) \./(testing/)?manifests/.*.yaml).*~\1~' | \ + sed -E 's/^(make\[2\]: \*\*\* \[).*: (.*\] Error 1)/\1\2/' | \ + grep -v 'are the same file' | \ + grep -E -v -e '^make\[2\]: Target `.*'"'"' not remade because of errors\.$$' | \ + tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) + +endef + + +# $1 - makefile name relative to ./make/ folder +# $2 - target +# $3 - output folder +define check-makefile-log +$(call update-makefile-log,$(1),$(2),$(3)) +diff -N "$(1)$(subst ..,.,.$(2).log)" "$(3)/$(notdir $(1))$(subst ..,.,.$(2).log)" + +endef + +update-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(dir $(f)))) +.PHONY: update-makefiles + +verify-makefiles: tmp_dir:=$(shell mktemp -d) +verify-makefiles: + $(foreach f,$(makefiles),$(call check-makefile-log,$(f),help,$(tmp_dir)/$(dir $(f)))) + $(foreach f,$(examples),$(call check-makefile-log,$(f),,$(tmp_dir)/$(dir $(f)))) +.PHONY: verify-makefiles + +verify: verify-makefiles +.PHONY: verify + +update: update-makefiles +.PHONY: update + + +include ./make/targets/help.mk diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/OWNERS b/vendor/github.com/openshift/api/hack/alpha-build-machinery/OWNERS new file mode 100644 index 000000000..ff2b6a24c --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - tnozicka +approvers: + - tnozicka diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/README.md b/vendor/github.com/openshift/api/hack/alpha-build-machinery/README.md new file mode 100644 index 000000000..294a5834a --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/README.md @@ -0,0 +1,37 @@ +# library-go/alpha-build-machinery +These are the building blocks for this and many of our other repositories to share code for Makefiles, helper scripts and other build related machinery. + +## Makefiles +`make/` directory contains several predefined makefiles `(*.mk)` to choose from and include one of them as a base in your final `Makefile`. These are the predefined flows providing you with e.g. `build`, `test` or `verify` targets. To start with it is recommended you base Makefile on the corresponding `*.example.mk` using copy&paste. + +As some advanced targets are generated, every Makefile contains `make help` target listing all the available ones. All of the "example" makefiles have a corresponding `.help` file listing all the targets available there. + +Also for advanced use and if none of the predefined flows doesn't fit your needs, you can compose the flow from modules in similar way to how the predefined flows do, + +### Golang +Standard makefile for building pure Golang projects. + - [make/golang.mk](make/golang.mk) + - [make/golang.example.mk](make/golang.example.mk) + - [make/golang.example.mk.help](make/golang.example.mk.help) + +### Default +Standard makefile for OpenShift Golang projects. + +Extends [#Golang](). + + - [make/default.mk](make/default.mk) + - [make/default.example.mk](make/default.example.mk) + - [make/default.example.mk.help](make/default.example.mk.help) + +### Operator +Standard makefile for OpenShift Golang projects. + +Extends [#Default](). + + - [make/operator.mk](make/operator.mk) + - [make/operator.example.mk](make/operator.example.mk) + - [make/operator.example.mk.help](make/operator.example.mk.help) + + +## Scripts +`scripts` contain more complicated logic that is used in some make targets. diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/doc.go new file mode 100644 index 000000000..a093b4bd1 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/doc.go @@ -0,0 +1,14 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery + +// this is a dependency magnet to make it easier to pull in the build-machinery. We want a single import to pull all of it in. +import ( + _ "github.com/openshift/library-go/alpha-build-machinery/make" + _ "github.com/openshift/library-go/alpha-build-machinery/make/lib" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/golang" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift" + _ "github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/operator" + _ "github.com/openshift/library-go/alpha-build-machinery/scripts" +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk new file mode 100644 index 000000000..fffc5b3a3 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk @@ -0,0 +1,40 @@ +all: build +.PHONY: all + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./default.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,ocp-cli,registry.svc.ci.openshift.org/ocp/4.2:cli,./images/cli/Dockerfile.rhel,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk.help.log b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk.help.log new file mode 100644 index 000000000..92aa6acdb --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-ocp-cli +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.mk new file mode 100644 index 000000000..564fc1229 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/default.mk @@ -0,0 +1,23 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + +# We extend the default verify/update for Golang + +verify: verify-codegen +verify: verify-bindata +.PHONY: verify + +update: update-codegen +update: update-bindata +.PHONY: update + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/openshift/deps.mk \ + targets/openshift/images.mk \ + targets/openshift/bindata.mk \ + targets/openshift/codegen.mk \ + golang.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk new file mode 100644 index 000000000..aba2c4890 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk @@ -0,0 +1,14 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./golang.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk.help.log b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk.help.log new file mode 100644 index 000000000..a5cc906dd --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.example.mk.help.log @@ -0,0 +1,14 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +test +test-unit +update +update-gofmt +verify +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.mk new file mode 100644 index 000000000..15a0b49bc --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/golang.mk @@ -0,0 +1,28 @@ +all: build +.PHONY: all + +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +verify: verify-gofmt +verify: verify-govet +.PHONY: verify + +update: update-gofmt +.PHONY: update + + +test: test-unit +.PHONY: test + +clean: clean-binaries +.PHONY: clean + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + targets/help.mk \ + targets/golang/*.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/golang.mk new file mode 100644 index 000000000..89d457ef0 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/golang.mk @@ -0,0 +1,39 @@ +GO ?=go +GOPATH ?=$(shell $(GO) env GOPATH) +GO_PACKAGE ?=$(shell $(GO) list -e -f '{{ .ImportPath }}' . || echo 'no_package_detected') + +GOOS ?=$(shell $(GO) env GOOS) +GOHOSTOS ?=$(shell $(GO) env GOHOSTOS) +GOARCH ?=$(shell $(GO) env GOARCH) +GOHOSTARCH ?=$(shell $(GO) env GOHOSTARCH) +GOEXE ?=$(shell $(GO) env GOEXE) + +GOFMT ?=gofmt +GOFMT_FLAGS ?=-s -l +GOLINT ?=golint + +GO_FILES ?=$(shell find . -name '*.go' -not -path '*/vendor/*' -not -path '*/_output/*' -print) +GO_PACKAGES ?=./... +GO_TEST_PACKAGES ?=$(GO_PACKAGES) + +GO_BUILD_PACKAGES ?=./cmd/... +GO_BUILD_PACKAGES_EXPANDED ?=$(shell $(GO) list $(GO_BUILD_PACKAGES)) +go_build_binaries =$(notdir $(GO_BUILD_PACKAGES_EXPANDED)) +GO_BUILD_FLAGS ?= +GO_BUILD_BINDIR ?= + +GO_TEST_FLAGS ?=-race + +GO_LD_EXTRAFLAGS ?= + +SOURCE_GIT_TAG ?=$(shell git describe --long --tags --abbrev=7 --match 'v[0-9]*' || echo 'v0.0.0-unknown') +SOURCE_GIT_COMMIT ?=$(shell git rev-parse --short "HEAD^{commit}" 2>/dev/null) +SOURCE_GIT_TREE_STATE ?=$(shell ( ( [ ! -d ".git/" ] || git diff --quiet ) && echo 'clean' ) || echo 'dirty') + +define version-ldflags +-X $(1).versionFromGit="$(SOURCE_GIT_TAG)" \ +-X $(1).commitFromGit="$(SOURCE_GIT_COMMIT)" \ +-X $(1).gitTreeState="$(SOURCE_GIT_TREE_STATE)" \ +-X $(1).buildDate="$(shell date -u +'%Y-%m-%dT%H:%M:%SZ')" +endef +GO_LD_FLAGS ?=-ldflags "-s -w $(call version-ldflags,$(GO_PACKAGE)/pkg/version) $(GO_LD_EXTRAFLAGS)" diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/tmp.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/tmp.mk new file mode 100644 index 000000000..a0fb65535 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/lib/tmp.mk @@ -0,0 +1,2 @@ +PERMANENT_TMP :=_output +PERMANENT_TMP_GOPATH :=$(PERMANENT_TMP)/tools diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk new file mode 100644 index 000000000..7e6ff98d5 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk @@ -0,0 +1,42 @@ +all: build +.PHONY: all + + +# You can customize go tools depending on the directory layout. +# example: +GO_BUILD_PACKAGES :=./pkg/... +# You can list all the golang related variables by: +# $ make -n --print-data-base | grep ^GO + +# Include the library makefile +include ./operator.mk +# All the available targets are listed in .help +# or you can list it live by using `make help` + + +# Codegen module needs setting these required variables +CODEGEN_OUTPUT_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/generated +CODEGEN_API_PACKAGE :=github.com/openshift/cluster-openshift-apiserver-operator/pkg/apis +CODEGEN_GROUPS_VERSION :=openshiftapiserver:v1alpha1 +# You can list all codegen related variables by: +# $ make -n --print-data-base | grep ^CODEGEN + +# This will call a macro called "build-image" which will generate image specific targets based on the parameters: +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +# It will generate target "image-$(1)" for builing the image an binding it as a prerequisite to target "images". +$(call build-image,ocp-openshift-apiserver-operator,registry.svc.ci.openshift.org/ocp/4.2:openshift-apiserver-operator,./Dockerfile.rhel,.) + +# This will call a macro called "add-bindata" which will generate bindata specific targets based on the parameters: +# $0 - macro name +# $1 - target suffix +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +# It will generate targets {update,verify}-bindata-$(1) logically grouping them in unsuffixed versions of these targets +# and also hooked into {update,verify}-generated for broader integration. +$(call add-bindata,v3.11.0,./bindata/v3.11.0/...,bindata,v311_00_assets,pkg/operator/v311_00_assets/bindata.go) + diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk.help.log b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk.help.log new file mode 100644 index 000000000..a1489d212 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.example.mk.help.log @@ -0,0 +1,25 @@ +The following make targets are available: +all +build +clean +clean-binaries +help +image-ocp-openshift-apiserver-operator +images +test +test-unit +update +update-bindata +update-codegen +update-deps +update-deps-overrides +update-generated +update-gofmt +verify +verify-bindata +verify-codegen +verify-deps +verify-generated +verify-gofmt +verify-golint +verify-govet diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.mk new file mode 100644 index 000000000..d763df461 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/operator.mk @@ -0,0 +1,11 @@ +self_dir := $(dir $(lastword $(MAKEFILE_LIST))) + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. +include $(addprefix $(self_dir), \ + default.mk \ + targets/openshift/operator/*.mk \ +) + diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/build.mk new file mode 100644 index 000000000..9a71cb793 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/build.mk @@ -0,0 +1,28 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +define build-package + $(if $(GO_BUILD_BINDIR),mkdir -p '$(GO_BUILD_BINDIR)',) + $(strip $(GO) build $(GO_BUILD_FLAGS) $(GO_LD_FLAGS) \ + $(if $(GO_BUILD_BINDIR),-o '$(GO_BUILD_BINDIR)/$(notdir $(1))$(GOEXE)',) \ + $(1)) + +endef + +# We need to build each package separately so go build creates appropriate binaries +build: + $(foreach package,$(GO_BUILD_PACKAGES_EXPANDED),$(call build-package,$(package))) +.PHONY: build + +clean-binaries: + $(RM) $(go_build_binaries) +.PHONY: clean-binaries + +clean: clean-binaries +.PHONY: clean + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/test-unit.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/test-unit.mk new file mode 100644 index 000000000..f96c8ccd7 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/test-unit.mk @@ -0,0 +1,19 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +test-unit: +ifndef JUNITFILE + $(GO) test $(GO_TEST_FLAGS) $(GO_TEST_PACKAGES) +else +ifeq (, $(shell which gotest2junit 2>/dev/null)) + $(error gotest2junit not found! Get it by `go get -u github.com/openshift/release/tools/gotest2junit`.) +endif + set -o pipefail; $(GO) test $(GO_TEST_FLAGS) -json $(GO_TEST_PACKAGES) | gotest2junit > $(JUNITFILE) +endif +.PHONY: test-unit + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/verify-update.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/verify-update.mk new file mode 100644 index 000000000..2034cd10e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/golang/verify-update.mk @@ -0,0 +1,34 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +go_files_count :=$(words $(GO_FILES)) + +verify-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS)` on $(go_files_count) file(s).) + @TMP=$$( mktemp ); \ + $(GOFMT) $(GOFMT_FLAGS) $(GO_FILES) | tee $${TMP}; \ + if [ -s $${TMP} ]; then \ + echo "$@ failed - please run \`make update-gofmt\`"; \ + exit 1; \ + fi; +.PHONY: verify-gofmt + +update-gofmt: + $(info Running `$(GOFMT) $(GOFMT_FLAGS) -w` on $(go_files_count) file(s).) + @$(GOFMT) $(GOFMT_FLAGS) -w $(GO_FILES) +.PHONY: update-gofmt + + +verify-govet: + $(GO) vet $(GO_PACKAGES) +.PHONY: verify-govet + +verify-golint: + $(GOLINT) $(GO_PACKAGES) +.PHONY: verify-govet + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/help.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/help.mk new file mode 100644 index 000000000..55bfbac09 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/help.mk @@ -0,0 +1,6 @@ +help: + $(info The following make targets are available:) + @$(MAKE) -f $(firstword $(MAKEFILE_LIST)) --print-data-base --question no-such-target 2>&1 | grep -v 'no-such-target' | \ + grep -v -e '^no-such-target' -e '^makefile' | \ + awk '/^[^.%][-A-Za-z0-9_]*:/ { print substr($$1, 1, length($$1)-1) }' | sort -u +.PHONY: help diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/bindata.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/bindata.mk new file mode 100644 index 000000000..0e78cb927 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/bindata.mk @@ -0,0 +1,65 @@ +TMP_GOPATH :=$(shell mktemp -d) + + +.ensure-go-bindata: + ln -s $(abspath ./vendor) "$(TMP_GOPATH)/src" + export GOPATH=$(TMP_GOPATH) && export GOBIN=$(TMP_GOPATH)/bin && go install "./vendor/github.com/jteeuwen/go-bindata/..." + +# $1 - input dirs +# $2 - prefix +# $3 - pkg +# $4 - output +# $5 - output prefix +define run-bindata + $(TMP_GOPATH)/bin/go-bindata -nocompress -nometadata \ + -prefix "$(2)" \ + -pkg "$(3)" \ + -o "$(5)$(4)" \ + -ignore "OWNERS" \ + $(1) && \ + gofmt -s -w "$(5)$(4)" +endef + +# $1 - name +# $2 - input dirs +# $3 - prefix +# $4 - pkg +# $5 - output +define add-bindata-internal +update-bindata-$(1): .ensure-go-bindata + $(call run-bindata,$(2),$(3),$(4),$(5),) +.PHONY: update-bindata-$(1) + +update-bindata: update-bindata-$(1) +.PHONY: update-bindata + + +verify-bindata-$(1): .ensure-go-bindata +verify-bindata-$(1): TMP_DIR := $$(shell mktemp -d) +verify-bindata-$(1): + $(call run-bindata,$(2),$(3),$(4),$(5),$$(TMP_DIR)/) && \ + diff -Naup {.,$$(TMP_DIR)}/$(5) +.PHONY: verify-bindata-$(1) + +verify-bindata: verify-bindata-$(1) +.PHONY: verify-bindata +endef + + +update-generated: update-bindata +.PHONY: update-bindata + +update: update-generated +.PHONY: update + + +verify-generated: verify-bindata +.PHONY: verify-bindata + +verify: verify-generated +.PHONY: verify + + +define add-bindata +$(eval $(call add-bindata-internal,$(1),$(2),$(3),$(4),$(5))) +endef diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/codegen.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/codegen.mk new file mode 100644 index 000000000..247de9417 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/codegen.mk @@ -0,0 +1,41 @@ +CODEGEN_PKG ?=./vendor/k8s.io/code-generator/ +CODEGEN_GENERATORS ?=all +CODEGEN_OUTPUT_BASE ?=../../.. +CODEGEN_GO_HEADER_FILE ?=/dev/null + +CODEGEN_API_PACKAGE ?=$(error CODEGEN_API_PACKAGE is required) +CODEGEN_GROUPS_VERSION ?=$(error CODEGEN_GROUPS_VERSION is required) +CODEGEN_OUTPUT_PACKAGE ?=$(error CODEGEN_OUTPUT_PACKAGE is required) + +define run-codegen +$(CODEGEN_PKG)/generate-groups.sh \ + "$(CODEGEN_GENERATORS)" \ + "$(CODEGEN_OUTPUT_PACKAGE)" \ + "$(CODEGEN_API_PACKAGE)" \ + "$(CODEGEN_GROUPS_VERSION)" \ + --output-base $(CODEGEN_OUTPUT_BASE) \ + --go-header-file $(CODEGEN_GO_HEADER_FILE) \ + $1 +endef + + +verify-codegen: + $(call run-codegen,--verify-only) +.PHONY: verify-codegen + +verify-generated: verify-codegen +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +update-codegen: + $(call run-codegen) +.PHONY: update-codegen + +update-generated: update-codegen +.PHONY: update-generated + +update: update-generated +.PHONY: update diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/controller-gen.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/controller-gen.mk new file mode 100644 index 000000000..fd0ff401e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/controller-gen.mk @@ -0,0 +1,38 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +CONTROLLER_GEN_VERSION ?=v0.2.1 +CONTROLLER_GEN_TEMP ?=$(PERMANENT_TMP_GOPATH)/src/sigs.k8s.io/controller-tools +controller_gen_gopath =$(shell realpath -m $(CONTROLLER_GEN_TEMP)/../..) +CONTROLLER_GEN ?=$(CONTROLLER_GEN_TEMP)/controller-gen + +ensure-controller-gen: +ifeq "" "$(wildcard $(CONTROLLER_GEN))" + $(info Installing controller-gen into "$(CONTROLLER_GEN)") + mkdir -p '$(CONTROLLER_GEN_TEMP)' + git clone -b '$(CONTROLLER_GEN_VERSION)' --single-branch --depth=1 https://github.com/kubernetes-sigs/controller-tools.git '$(CONTROLLER_GEN_TEMP)' + @echo '$(CONTROLLER_GEN_TEMP)/../..' + cd '$(CONTROLLER_GEN_TEMP)' && export GO111MODULE=on GOPATH='$(controller_gen_gopath)' && $(GO) mod vendor 2>/dev/null && $(GO) build -mod=vendor ./cmd/controller-gen +else + $(info Using existing controller-gen from "$(CONTROLLER_GEN)") +endif +.PHONY: ensure-controller-gen + +clean-controller-gen: + if [ -d '$(controller_gen_gopath)/pkg/mod' ]; then chmod +w -R '$(controller_gen_gopath)/pkg/mod'; fi + $(RM) -r '$(CONTROLLER_GEN_TEMP)' '$(controller_gen_gopath)/pkg/mod' + @mkdir -p '$(CONTROLLER_GEN_TEMP)' # to make sure we can do the next step and to avoid using '/*' wildcard on the line above which could go crazy on wrong substitution + if [ -d '$(CONTROLLER_GEN_TEMP)' ]; then rmdir --ignore-fail-on-non-empty -p '$(CONTROLLER_GEN_TEMP)'; fi + @mkdir -p '$(controller_gen_gopath)/pkg/mod' # to make sure we can do the next step and to avoid using '/*' wildcard on the line above which could go crazy on wrong substitution + if [ -d '$(controller_gen_gopath)/pkg/mod' ]; then rmdir --ignore-fail-on-non-empty -p '$(controller_gen_gopath)/pkg/mod'; fi +.PHONY: clean-controller-gen + +clean: clean-controller-gen + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk new file mode 100644 index 000000000..44963e96d --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk @@ -0,0 +1,80 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +# $1 - crd file +# $2 - patch file +define patch-crd + $(YQ) m -i -x '$(1)' '$(2)' + +endef + +empty := + +define diff-file + diff -Naup '$(1)' '$(2)' + +endef + +# $1 - apis +# $2 - manifests +# $3 - output +define run-crd-gen + '$(CONTROLLER_GEN)' \ + schemapatch:manifests="$(2)" \ + paths="$(subst $(empty) ,;,$(1))" \ + output:dir="$(3)" + $$(foreach p,$$(wildcard $(2)/*.crd.yaml-merge-patch),$$(call patch-crd,$$(subst $(2),$(3),$$(basename $$(p))).yaml,$$(p))) +endef + + +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +define add-crd-gen-internal + +update-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$(4)) +.PHONY: update-codegen-crds-$(1) + +update-codegen-crds: update-codegen-crds-$(1) +.PHONY: update-codegen-crds + +verify-codegen-crds-$(1): VERIFY_CODEGEN_CRD_TMP_DIR:=$(shell mktemp -d) +verify-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR)) + $$(foreach p,$$(wildcard $(3)/*.crd.yaml),$$(call diff-file,$$(p),$$(subst $(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR),$$(p)))) +.PHONY: verify-codegen-crds-$(1) + +verify-codegen-crds: verify-codegen-crds-$(1) +.PHONY: verify-codegen-crds + +endef + + +update-generated: update-codegen-crds +.PHONY: update-generated + +update: update-generated +.PHONY: update + +verify-generated: verify-codegen-crds +.PHONY: verify-generated + +verify: verify-generated +.PHONY: verify + + +define add-crd-gen +$(eval $(call add-crd-gen-internal,$(1),$(2),$(3),$(4))) +endef + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ + ../../targets/openshift/controller-gen.mk \ + ../../targets/openshift/yq.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/deps.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/deps.mk new file mode 100644 index 000000000..fafa8f9da --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/deps.mk @@ -0,0 +1,35 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) +scripts_dir :=$(self_dir)/../../../scripts + +# We need to force localle so different envs sort files the same way for recursive traversals +deps_diff :=LC_COLLATE=C diff --no-dereference -N + +update-deps: + $(scripts_dir)/$@.sh +.PHONY: update-deps + +# $1 - temporary directory to restore vendor dependencies from glide.lock +define restore-deps + ln -s $(abspath ./) "$(1)"/current + cp -R -H ./ "$(1)"/updated + $(RM) -r "$(1)"/updated/vendor + cd "$(1)"/updated && glide install --strip-vendor && find ./vendor -name '.hg_archival.txt' -delete + cd "$(1)" && $(deps_diff) -r {current,updated}/vendor/ > updated/glide.diff || true +endef + +verify-deps: tmp_dir:=$(shell mktemp -d) +verify-deps: + $(call restore-deps,$(tmp_dir)) + @echo $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff + @ $(deps_diff) '$(tmp_dir)'/{current,updated}/glide.diff || ( \ + echo "ERROR: Content of 'vendor/' directory doesn't match 'glide.lock' and the overrides in 'glide.diff'!" && \ + echo "If this is an intentional change (a carry patch) please update the 'glide.diff' using 'make update-deps-overrides'." && \ + exit 1 \ + ) +.PHONY: verify-deps + +update-deps-overrides: tmp_dir:=$(shell mktemp -d) +update-deps-overrides: + $(call restore-deps,$(tmp_dir)) + cp "$(tmp_dir)"/{updated,current}/glide.diff +.PHONY: update-deps-overrides diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/images.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/images.mk new file mode 100644 index 000000000..00e76ac26 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/images.mk @@ -0,0 +1,29 @@ +# IMAGE_BUILD_EXTRA_FLAGS lets you add extra flags for imagebuilder +# e.g. to mount secrets and repo information into base image like: +# make images IMAGE_BUILD_EXTRA_FLAGS='-mount ~/projects/origin-repos/4.2/:/etc/yum.repos.d/' +IMAGE_BUILD_DEFAULT_FLAGS ?=--allow-pull +IMAGE_BUILD_EXTRA_FLAGS ?= + +# $1 - target name +# $2 - image ref +# $3 - Dockerfile path +# $4 - context +define build-image-internal +image-$(1): + $(strip \ + imagebuilder \ + $(IMAGE_BUILD_DEFAULT_FLAGS) \ + -t $(2) + -f $(3) \ + $(IMAGE_BUILD_EXTRA_FLAGS) \ + $(4) \ + ) +.PHONY: image-$(1) + +images: image-$(1) +.PHONY: images +endef + +define build-image +$(eval $(call build-image-internal,$(1),$(2),$(3),$(4))) +endef diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/release.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/release.mk new file mode 100644 index 000000000..07fc5605a --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/operator/release.mk @@ -0,0 +1,7 @@ +# If we need unified behaviour specific to operators, this folder is the place. + +# It seems that our previous origin-release jq based replacement is suppose to be done +# with `oc adm release new` so it might drop this target. +#origin-release: +# $(error Not implemented.) +#.PHONY: origin-release diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/rpm.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/rpm.mk new file mode 100644 index 000000000..b235197c7 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/rpm.mk @@ -0,0 +1,41 @@ +RPM_OUTPUT_DIR ?=_output +RPM_TOPDIR ?=$(abspath ./) +RPM_BUILDDIR ?=$(RPM_TOPDIR) +RPM_BUILDROOT ?=$(RPM_TOPDIR) +RPM_SOURCEDIR ?=$(RPM_TOPDIR) +RPM_SPECDIR ?=$(RPM_TOPDIR) +RPM_RPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/rpms +RPM_SRCRPMDIR ?=$(RPM_TOPDIR)/$(RPM_OUTPUT_DIR)/srpms + +RPM_SPECFILES ?=$(wildcard *.spec) +RPM_BUILDFLAGS ?=-ba +RPM_EXTRAFLAGS ?= + +rpm-build: + $(strip \ + rpmbuild $(RPM_BUILDFLAGS) \ + --define "_topdir $(RPM_TOPDIR)" \ + --define "_builddir $(RPM_BUILDDIR)" \ + --define "_buildrootdir $(RPM_BUILDROOT)" \ + --define "_rpmdir $(RPM_RPMDIR)" \ + --define "_srcrpmdir $(RPM_SRCRPMDIR)" \ + --define "_specdir $(RPM_SPECDIR)" \ + --define "_sourcedir $(RPM_SOURCEDIR)" \ + --define "go_package $(GO_PACKAGE)" \ + $(RPM_EXTRAFLAGS) \ + $(RPM_SPECFILES) \ + ) + +clean-rpms: + $(RM) -r '$(RPM_RPMDIR)' '$(RPM_SRCRPMDIR)' + if [ -d '$(RPM_OUTPUT_DIR)' ]; then rmdir --ignore-fail-on-non-empty '$(RPM_OUTPUT_DIR)'; fi +.PHONY: clean-rpms + +clean: clean-rpms + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/yq.mk b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/yq.mk new file mode 100644 index 000000000..7dd556d40 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/make/targets/openshift/yq.mk @@ -0,0 +1,32 @@ +self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) + +YQ ?=$(PERMANENT_TMP_GOPATH)/bin/yq +yq_dir :=$(dir $(YQ)) + + +ensure-yq: +ifeq "" "$(wildcard $(YQ))" + $(info Installing yq into '$(YQ)') + mkdir -p '$(yq_dir)' + curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_$(GOHOSTOS)_$(GOHOSTARCH) -o '$(YQ)' + chmod +x '$(YQ)'; +else + $(info Using existing yq from "$(YQ)") +endif +.PHONY: ensure-yq + +clean-yq: + $(RM) '$(YQ)' + if [ -d '$(yq_dir)' ]; then rmdir --ignore-fail-on-non-empty -p '$(yq_dir)'; fi +.PHONY: clean-yq + +clean: clean-yq + + +# We need to be careful to expand all the paths before any include is done +# or self_dir could be modified for the next include by the included file. +# Also doing this at the end of the file allows us to user self_dir before it could be modified. +include $(addprefix $(self_dir), \ + ../../lib/golang.mk \ + ../../lib/tmp.mk \ +) diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/doc.go b/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/doc.go new file mode 100644 index 000000000..66ba5512e --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/doc.go @@ -0,0 +1,3 @@ +// required for gomod to pull in packages. + +package alpha_build_machinery diff --git a/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/update-deps.sh b/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/update-deps.sh new file mode 100755 index 000000000..46812e939 --- /dev/null +++ b/vendor/github.com/openshift/api/hack/alpha-build-machinery/scripts/update-deps.sh @@ -0,0 +1,27 @@ +#!/bin/bash -e + +readonly GLIDE_MINOR_VERSION="13" +readonly REQUIRED_GLIDE_VERSION="0.$GLIDE_MINOR_VERSION" + +function verify_glide_version() { + if ! command -v glide &> /dev/null; then + echo "[FATAL] Glide was not found in \$PATH. Please install version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi + + local glide_version + glide_version=($(glide --version)) + if ! echo "${glide_version[2]#v}" | awk -F. -v min=$GLIDE_MINOR_VERSION '{ exit $2 < min }'; then + echo "Detected glide version: ${glide_version[*]}." + echo "Please install Glide version ${REQUIRED_GLIDE_VERSION} or newer." + exit 1 + fi +} + +verify_glide_version + +glide update --strip-vendor + +# glide doesn't handle mercurial properly and leaves internal files (equivalent of .git/) laying around +# Given those files differ by mercurial version it was cloned with, verify-deps would break +find ./vendor -name '.hg_archival.txt' -delete diff --git a/vendor/github.com/openshift/api/hack/lib/init.sh b/vendor/github.com/openshift/api/hack/lib/init.sh index 9b2c2e7d6..c270cb492 100644 --- a/vendor/github.com/openshift/api/hack/lib/init.sh +++ b/vendor/github.com/openshift/api/hack/lib/init.sh @@ -30,7 +30,6 @@ security/v1 \ servicecertsigner/v1alpha1 \ template/v1 \ user/v1 \ -webconsole/v1 \ " API_PACKAGES="\ github.com/openshift/api/apps/v1,\ diff --git a/vendor/github.com/openshift/api/hack/update-deepcopy.sh b/vendor/github.com/openshift/api/hack/update-deepcopy.sh index b9a2a21ba..fead8a958 100755 --- a/vendor/github.com/openshift/api/hack/update-deepcopy.sh +++ b/vendor/github.com/openshift/api/hack/update-deepcopy.sh @@ -10,7 +10,7 @@ verify="${VERIFY:-}" ${CODEGEN_PKG}/generate-groups.sh "deepcopy" \ github.com/openshift/api/generated \ github.com/openshift/api \ - "apps:v1 authorization:v1 build:v1 config:v1 console:v1 image:v1,docker10,dockerpre012 kubecontrolplane:v1 legacyconfig:v1 network:v1 oauth:v1 openshiftcontrolplane:v1 operator:v1 operator:v1alpha1 osin:v1 project:v1 quota:v1 route:v1 security:v1 servicecertsigner:v1alpha1 template:v1 user:v1 webconsole:v1" \ + "apps:v1 authorization:v1 build:v1 config:v1 console:v1 image:v1,docker10,dockerpre012 kubecontrolplane:v1 legacyconfig:v1 network:v1 oauth:v1 openshiftcontrolplane:v1 operator:v1 operator:v1alpha1 osin:v1 project:v1 quota:v1 route:v1 security:v1 servicecertsigner:v1alpha1 template:v1 user:v1" \ --go-header-file ${SCRIPT_ROOT}/hack/empty.txt \ ${verify} diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go index 3fc1729a8..3dee1efbf 100644 --- a/vendor/github.com/openshift/api/install.go +++ b/vendor/github.com/openshift/api/install.go @@ -51,7 +51,6 @@ import ( "github.com/openshift/api/servicecertsigner" "github.com/openshift/api/template" "github.com/openshift/api/user" - "github.com/openshift/api/webconsole" // just make sure this compiles. Don't add it to a scheme _ "github.com/openshift/api/legacyconfig/v1" @@ -77,7 +76,6 @@ var ( servicecertsigner.Install, template.Install, user.Install, - webconsole.Install, ) // Install is a function which adds every version of every openshift group to a scheme Install = schemeBuilder.AddToScheme diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index 467696bb5..5172dbe77 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -49,6 +49,9 @@ type DNSStatus struct { // Example: dig foo.com @ // // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // + // +kubebuilder:validation:Required + // +required ClusterIP string `json:"clusterIP"` // clusterDomain is the local cluster DNS domain suffix for DNS services. @@ -57,6 +60,9 @@ type DNSStatus struct { // Example: "cluster.local" // // More info: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service + // + // +kubebuilder:validation:Required + // +required ClusterDomain string `json:"clusterDomain"` // conditions provide information about the state of the DNS on the cluster. diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 1d119b5b4..e1e095b52 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -4,6 +4,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/api/core/v1" + + configv1 "github.com/openshift/api/config/v1" ) // +genclient @@ -125,6 +127,19 @@ type IngressControllerSpec struct { // // +optional NodePlacement *NodePlacement `json:"nodePlacement,omitempty"` + + // tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers. + // + // If unset, the default is based on the apiservers.config.openshift.io/cluster resource. + // + // Note that when using the Old, Intermediate, and Modern profile types, the effective + // profile configuration is subject to change between releases. For example, given + // a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade + // to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress + // controller, resulting in a rollout. + // + // +optional + TLSSecurityProfile *configv1.TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` } // NodePlacement describes node scheduling configuration for an ingress @@ -217,7 +232,7 @@ type EndpointPublishingStrategy struct { // In this configuration, the ingress controller deployment uses container // networking. A LoadBalancer Service is created to publish the deployment. // - // See: https://kubernetes.io/docs/concepts/services-networking/#loadbalancer + // See: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer // // If domain is set, a wildcard DNS record will be managed to point at the // LoadBalancer Service's external name. DNS records are managed only in DNS @@ -332,6 +347,10 @@ type IngressControllerStatus struct { // * DNS records have been successfully created. // - False if any of those conditions are unsatisfied. Conditions []OperatorCondition `json:"conditions,omitempty"` + + // tlsProfile is the TLS connection configuration that is in effect. + // +optional + TLSProfile *configv1.TLSProfileSpec `json:"tlsProfile,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index d939df2bf..101e7aaf9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -75,7 +75,7 @@ type NetworkSpec struct { // HostPrefix (in CIDR notation) will be allocated when nodes join the cluster. // Not all network providers support multiple ClusterNetworks type ClusterNetworkEntry struct { - CIDR string `json:"cidr"` + CIDR string `json:"cidr"` // +kubebuilder:validation:Minimum=0 HostPrefix uint32 `json:"hostPrefix"` } @@ -258,7 +258,7 @@ type KuryrConfig struct { OpenStackServiceNetwork string `json:"openStackServiceNetwork,omitempty"` } -// ovnKubernetesConfig is the proposed configuration parameters for networks +// ovnKubernetesConfig contains the configuration parameters for networks // using the ovn-kubernetes network project type OVNKubernetesConfig struct { // mtu is the MTU to use for the tunnel interface. This must be 100 @@ -267,6 +267,11 @@ type OVNKubernetesConfig struct { // +kubebuilder:validation:Minimum=0 // +optional MTU *uint32 `json:"mtu,omitempty"` + // geneve port is the UDP port to be used by geneve encapulation. + // Default is 6081 + // +kubebuilder:validation:Minimum=1 + // +optional + GenevePort *uint32 `json:"genevePort,omitempty"` } // NetworkType describes the network plugin type to configure diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 5ed29b74a..ebecfb874 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -5,6 +5,7 @@ package v1 import ( + configv1 "github.com/openshift/api/config/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -681,6 +682,11 @@ func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { *out = new(NodePlacement) (*in).DeepCopyInto(*out) } + if in.TLSSecurityProfile != nil { + in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile + *out = new(configv1.TLSSecurityProfile) + (*in).DeepCopyInto(*out) + } return } @@ -709,6 +715,11 @@ func (in *IngressControllerStatus) DeepCopyInto(out *IngressControllerStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.TLSProfile != nil { + in, out := &in.TLSProfile, &out.TLSProfile + *out = new(configv1.TLSProfileSpec) + (*in).DeepCopyInto(*out) + } return } @@ -1286,6 +1297,11 @@ func (in *OVNKubernetesConfig) DeepCopyInto(out *OVNKubernetesConfig) { *out = new(uint32) **out = **in } + if in.GenevePort != nil { + in, out := &in.GenevePort, &out.GenevePort + *out = new(uint32) + **out = **in + } return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 57e40e8b1..af643c007 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -230,7 +230,7 @@ func (EtcdList) SwaggerDoc() map[string]string { var map_EndpointPublishingStrategy = map[string]string{ "": "EndpointPublishingStrategy is a way to publish the endpoints of an IngressController, and represents the type and any additional configuration for a specific type.", - "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.", + "type": "type is the publishing strategy to use. Valid values are:\n\n* LoadBalancerService\n\nPublishes the ingress controller using a Kubernetes LoadBalancer Service.\n\nIn this configuration, the ingress controller deployment uses container networking. A LoadBalancer Service is created to publish the deployment.\n\nSee: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n\nIf domain is set, a wildcard DNS record will be managed to point at the LoadBalancer Service's external name. DNS records are managed only in DNS zones defined by dns.config.openshift.io/cluster .spec.publicZone and .spec.privateZone.\n\nWildcard DNS management is currently supported only on the AWS, Azure, and GCP platforms.\n\n* HostNetwork\n\nPublishes the ingress controller on node ports where the ingress controller is deployed.\n\nIn this configuration, the ingress controller deployment uses host networking, bound to node ports 80 and 443. The user is responsible for configuring an external load balancer to publish the ingress controller via the node ports.\n\n* Private\n\nDoes not publish the ingress controller.\n\nIn this configuration, the ingress controller deployment uses container networking, and is not explicitly published. The user must manually publish the ingress controller.", "loadBalancer": "loadBalancer holds parameters for the load balancer. Present only if type is LoadBalancerService.", "hostNetwork": "hostNetwork holds parameters for the HostNetwork endpoint publishing strategy. Present only if type is HostNetwork.", "private": "private holds parameters for the Private endpoint publishing strategy. Present only if type is Private.", @@ -275,6 +275,7 @@ var map_IngressControllerSpec = map[string]string{ "namespaceSelector": "namespaceSelector is used to filter the set of namespaces serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", "routeSelector": "routeSelector is used to filter the set of Routes serviced by the ingress controller. This is useful for implementing shards.\n\nIf unset, the default is no filtering.", "nodePlacement": "nodePlacement enables explicit control over the scheduling of the ingress controller.\n\nIf unset, defaults are used. See NodePlacement for more details.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for ingresscontrollers.\n\nIf unset, the default is based on the apiservers.config.openshift.io/cluster resource.\n\nNote that when using the Old, Intermediate, and Modern profile types, the effective profile configuration is subject to change between releases. For example, given a specification to use the Intermediate profile deployed on release X.Y.Z, an upgrade to release X.Y.Z+1 may cause a new profile configuration to be applied to the ingress controller, resulting in a rollout.", } func (IngressControllerSpec) SwaggerDoc() map[string]string { @@ -288,6 +289,7 @@ var map_IngressControllerStatus = map[string]string{ "domain": "domain is the actual domain in use.", "endpointPublishingStrategy": "endpointPublishingStrategy is the actual strategy in use.", "conditions": "conditions is a list of conditions and their status.\n\nAvailable means the ingress controller deployment is available and servicing route and ingress resources (i.e, .status.availableReplicas equals .spec.replicas)\n\nThere are additional conditions which indicate the status of other ingress controller features and capabilities.\n\n * LoadBalancerManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy requires a service load balancer.\n - False if any of those conditions are unsatisfied.\n\n * LoadBalancerReady\n - True if the following conditions are met:\n * A load balancer is managed.\n * The load balancer is ready.\n - False if any of those conditions are unsatisfied.\n\n * DNSManaged\n - True if the following conditions are met:\n * The endpoint publishing strategy and platform support DNS.\n * The ingress controller domain is set.\n * dns.config.openshift.io/cluster configures DNS zones.\n - False if any of those conditions are unsatisfied.\n\n * DNSReady\n - True if the following conditions are met:\n * DNS is managed.\n * DNS records have been successfully created.\n - False if any of those conditions are unsatisfied.", + "tlsProfile": "tlsProfile is the TLS connection configuration that is in effect.", } func (IngressControllerStatus) SwaggerDoc() map[string]string { @@ -451,8 +453,9 @@ func (NetworkStatus) SwaggerDoc() map[string]string { } var map_OVNKubernetesConfig = map[string]string{ - "": "ovnKubernetesConfig is the proposed configuration parameters for networks using the ovn-kubernetes network project", - "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "": "ovnKubernetesConfig contains the configuration parameters for networks using the ovn-kubernetes network project", + "mtu": "mtu is the MTU to use for the tunnel interface. This must be 100 bytes smaller than the uplink mtu. Default is 1400", + "genevePort": "geneve port is the UDP port to be used by geneve encapulation. Default is 6081", } func (OVNKubernetesConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml new file mode 100644 index 000000000..d43fddef3 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_imagecontentsourcepolicy.crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: imagecontentsourcepolicies.operator.openshift.io +spec: + group: operator.openshift.io + scope: Cluster + names: + kind: ImageContentSourcePolicy + singular: imagecontentsourcepolicy + plural: imagecontentsourcepolicies + listKind: ImageContentSourcePolicyList + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + "validation": + "openAPIV3Schema": + description: ImageContentSourcePolicy holds cluster-wide information about how + to handle registry mirror rules. When multiple policies are defined, the outcome + of the behavior is defined on each field. + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + repositoryDigestMirrors: + description: "repositoryDigestMirrors allows images referenced by image + digests in pods to be pulled from alternative mirrored repository + locations. The image pull specification provided to the pod will be + compared to the source locations described in RepositoryDigestMirrors + and the image may be pulled down from any of the mirrors in the list + instead of the specified repository allowing administrators to choose + a potentially faster mirror. Only image pull specifications that have + an image disgest will have this behavior applied to them - tags will + continue to be pulled from the specified repository in the pull spec. + \n Each “source” repository is treated independently; configurations + for different “source” repositories don’t interact. \n When multiple + policies are defined for the same “source” repository, the sets of + defined mirrors will be merged together, preserving the relative order + of the mirrors, if possible. For example, if policy A has mirrors + `a, b, c` and policy B has mirrors `c, d, e`, the mirrors will be + used in the order `a, b, c, d, e`. If the orders of mirror entries + conflict (e.g. `a, b` vs. `b, a`) the configuration is not rejected + but the resulting order is unspecified." + type: array + items: + description: 'RepositoryDigestMirrors holds cluster-wide information + about how to handle mirros in the registries config. Note: the mirrors + only work when pulling the images that are referenced by their digests.' + type: object + required: + - source + properties: + mirrors: + description: mirrors is one or more repositories that may also + contain the same images. The order of mirrors in this list is + treated as the user's desired priority, while source is by default + considered lower priority than all mirrors. Other cluster configuration, + including (but not limited to) other repositoryDigestMirrors + objects, may impact the exact order mirrors are contacted in, + or some mirrors may be contacted in parallel, so this should + be considered a preference rather than a guarantee of ordering. + type: array + items: + type: string + source: + description: source is the repository that users refer to, e.g. + in image pull specifications. + type: string diff --git a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml new file mode 100644 index 000000000..b2c56a140 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml @@ -0,0 +1,220 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: clusterresourcequotas.quota.openshift.io +spec: + group: quota.openshift.io + names: + kind: ClusterResourceQuota + listKind: ClusterResourceQuotaList + plural: clusterresourcequotas + singular: clusterresourcequota + scope: Cluster + subresources: + status: {} + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This + object is easily convertible to synthetic ResourceQuota object to allow quota + evaluation re-use. + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: Standard object's metadata. + type: object + spec: + description: Spec defines the desired quota + type: object + required: + - quota + - selector + properties: + quota: + description: Quota defines the desired quota + type: object + properties: + hard: + description: 'hard is the set of desired hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + type: string + scopeSelector: + description: scopeSelector is also a collection of filters like + scopes that must match each object tracked by a quota but expressed + using ScopeSelectorOperator in combination with possible values. + For a resource to match, both scopes AND scopeSelector (if specified + in spec), must be matched. + type: object + properties: + matchExpressions: + description: A list of scope selector requirements by scope + of the resources. + type: array + items: + description: A scoped-resource selector requirement is a selector + that contains values, a scope name, and an operator that + relates the scope name and values. + type: object + required: + - operator + - scopeName + properties: + operator: + description: Represents a scope's relationship to a set + of values. Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector applies + to. + type: string + values: + description: An array of string values. If the operator + is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + scopes: + description: A collection of filters that must match each object + tracked by a quota. If not specified, the quota matches all objects. + type: array + items: + description: A ResourceQuotaScope defines a filter that must match + each object tracked by a quota + type: string + selector: + description: Selector is the selector used to match projects. It should + only select active projects on the scale of dozens (though it can + select many more less active projects). These projects will contend + on object creation through this resource. + type: object + properties: + annotations: + description: AnnotationSelector is used to select projects by annotation. + type: object + additionalProperties: + type: string + nullable: true + labels: + description: LabelSelector is used to select projects by label. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the + key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship + to a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a + strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + additionalProperties: + type: string + nullable: true + status: + description: Status defines the actual enforced quota and its current usage + type: object + required: + - total + properties: + namespaces: + description: Namespaces slices the usage by project. This division + allows for quick resolution of deletion reconciliation inside of a + single project without requiring a recalculation across all projects. This + can be used to pull the deltas for a given project. + type: array + items: + description: ResourceQuotaStatusByNamespace gives status for a particular + project + type: object + required: + - namespace + - status + properties: + namespace: + description: Namespace the project this status applies to + type: string + status: + description: Status indicates how many resources have been consumed + by this project + type: object + properties: + hard: + description: 'Hard is the set of enforced hard limits for + each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + type: string + used: + description: Used is the current observed total usage of the + resource in the namespace. + type: object + additionalProperties: + type: string + nullable: true + total: + description: Total defines the actual enforced quota and its current + usage across all projects + type: object + properties: + hard: + description: 'Hard is the set of enforced hard limits for each named + resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + type: string + used: + description: Used is the current observed total usage of the resource + in the namespace. + type: object + additionalProperties: + type: string diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml new file mode 100644 index 000000000..57a383945 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml @@ -0,0 +1,315 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: securitycontextconstraints.security.openshift.io +spec: + group: security.openshift.io + names: + kind: SecurityContextConstraints + listKind: SecurityContextConstraintsList + plural: securitycontextconstraints + singular: securitycontextconstraints + scope: Cluster + versions: + - name: v1 + served: true + storage: true + "validation": + "openAPIV3Schema": + description: SecurityContextConstraints governs the ability to make requests + that affect the SecurityContext that will be applied to a container. For historical + reasons SCC was exposed under the core Kubernetes API group. That exposure + is deprecated and will be removed in a future release - users should instead + use the security.openshift.io group to manage SecurityContextConstraints. + type: object + required: + - allowHostDirVolumePlugin + - allowHostIPC + - allowHostNetwork + - allowHostPID + - allowHostPorts + - allowPrivilegedContainer + - allowedCapabilities + - defaultAddCapabilities + - priority + - readOnlyRootFilesystem + - requiredDropCapabilities + - volumes + properties: + allowHostDirVolumePlugin: + description: AllowHostDirVolumePlugin determines if the policy allow containers + to use the HostDir volume plugin + type: boolean + allowHostIPC: + description: AllowHostIPC determines if the policy allows host ipc in the + containers. + type: boolean + allowHostNetwork: + description: AllowHostNetwork determines if the policy allows the use of + HostNetwork in the pod spec. + type: boolean + allowHostPID: + description: AllowHostPID determines if the policy allows host pid in the + containers. + type: boolean + allowHostPorts: + description: AllowHostPorts determines if the policy allows host ports in + the containers. + type: boolean + allowPrivilegeEscalation: + description: AllowPrivilegeEscalation determines if a pod can request to + allow privilege escalation. If unspecified, defaults to true. + type: boolean + nullable: true + allowPrivilegedContainer: + description: AllowPrivilegedContainer determines if a container can request + to be run as privileged. + type: boolean + allowedCapabilities: + description: AllowedCapabilities is a list of capabilities that can be requested + to add to the container. Capabilities in this field maybe added at the + pod author's discretion. You must not list a capability in both AllowedCapabilities + and RequiredDropCapabilities. To allow all capabilities you may use '*'. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + allowedFlexVolumes: + description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty + or nil indicates that all Flexvolumes may be used. This parameter is + effective only when the usage of the Flexvolumes is allowed in the "Volumes" + field. + type: array + items: + description: AllowedFlexVolume represents a single Flexvolume that is + allowed to be used. + type: object + required: + - driver + properties: + driver: + description: Driver is the name of the Flexvolume driver. + type: string + nullable: true + allowedUnsafeSysctls: + description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe + sysctls, defaults to none. Each entry is either a plain sysctl name or + ends in \"*\" in which case it is considered as a prefix of allowed sysctls. + Single * means all unsafe sysctls are allowed. Kubelet has to whitelist + all allowed unsafe sysctls explicitly to avoid rejection. \n Examples: + e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows + \"foo.bar\", \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + defaultAddCapabilities: + description: DefaultAddCapabilities is the default set of capabilities that + will be added to the container unless the pod spec specifically drops + the capability. You may not list a capabiility in both DefaultAddCapabilities + and RequiredDropCapabilities. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + defaultAllowPrivilegeEscalation: + description: DefaultAllowPrivilegeEscalation controls the default setting + for whether a process can gain more privileges than its parent process. + type: boolean + nullable: true + forbiddenSysctls: + description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, + defaults to none. Each entry is either a plain sysctl name or ends in + \"*\" in which case it is considered as a prefix of forbidden sysctls. + Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" + forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", + \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + fsGroup: + description: FSGroup is the strategy that will dictate what fs group is + used by the SecurityContext. + type: object + properties: + ranges: + description: Ranges are the allowed ranges of fs groups. If you would + like to force a single fs group then supply a single range with the + same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. + TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what FSGroup is + used in the SecurityContext. + type: string + nullable: true + groups: + description: The groups that have permission to use this security context + constraints + type: array + items: + type: string + nullable: true + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + description: 'Standard object''s metadata. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata' + type: object + priority: + description: Priority influences the sort order of SCCs when evaluating + which SCCs to try first for a given pod request based on access in the + Users and Groups fields. The higher the int, the higher priority. An + unset value is considered a 0 priority. If scores for multiple SCCs are + equal they will be sorted from most restrictive to least restrictive. + If both priorities and restrictions are equal the SCCs will be sorted + by name. + type: integer + format: int32 + nullable: true + readOnlyRootFilesystem: + description: ReadOnlyRootFilesystem when set to true will force containers + to run with a read only root file system. If the container specifically + requests to run with a non-read only root file system the SCC should deny + the pod. If set to false the container may run with a read only root file + system if it wishes but it will not be forced to. + type: boolean + requiredDropCapabilities: + description: RequiredDropCapabilities are the capabilities that will be + dropped from the container. These are required to be dropped and cannot + be added. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + runAsUser: + description: RunAsUser is the strategy that will dictate what RunAsUser + is used in the SecurityContext. + type: object + properties: + type: + description: Type is the strategy that will dictate what RunAsUser is + used in the SecurityContext. + type: string + uid: + description: UID is the user id that containers must run as. Required + for the MustRunAs strategy if not using namespace/service account + allocated uids. + type: integer + format: int64 + uidRangeMax: + description: UIDRangeMax defines the max value for a strategy that allocates + by range. + type: integer + format: int64 + uidRangeMin: + description: UIDRangeMin defines the min value for a strategy that allocates + by range. + type: integer + format: int64 + nullable: true + seLinuxContext: + description: SELinuxContext is the strategy that will dictate what labels + will be set in the SecurityContext. + type: object + properties: + seLinuxOptions: + description: seLinuxOptions required to run as; required for MustRunAs + type: object + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: + description: Type is the strategy that will dictate what SELinux context + is used in the SecurityContext. + type: string + nullable: true + seccompProfiles: + description: "SeccompProfiles lists the allowed profiles that may be set + for the pod or container's seccomp annotations. An unset (nil) or empty + value means that no profiles may be specifid by the pod or container.\tThe + wildcard '*' may be used to allow all profiles. When used to generate + a value for a pod the first non-wildcard profile will be used as the default." + type: array + items: + type: string + nullable: true + supplementalGroups: + description: SupplementalGroups is the strategy that will dictate what supplemental + groups are used by the SecurityContext. + type: object + properties: + ranges: + description: Ranges are the allowed ranges of supplemental groups. If + you would like to force a single supplemental group then supply a + single range with the same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. + TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what supplemental + groups is used in the SecurityContext. + type: string + nullable: true + users: + description: The users who have permissions to use this security context + constraints + type: array + items: + type: string + nullable: true + volumes: + description: Volumes is a white list of allowed volume plugins. FSType + corresponds directly with the field names of a VolumeSource (azureFile, + configMap, emptyDir). To allow all volumes you may use "*". To allow + no volumes, set to ["none"]. + type: array + items: + description: FS Type gives strong typing to different file systems that + are used by volumes. + type: string + nullable: true diff --git a/vendor/github.com/openshift/api/webconsole/install.go b/vendor/github.com/openshift/api/webconsole/install.go deleted file mode 100644 index 645e5275e..000000000 --- a/vendor/github.com/openshift/api/webconsole/install.go +++ /dev/null @@ -1,26 +0,0 @@ -package webconsole - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - - webconsolev1 "github.com/openshift/api/webconsole/v1" -) - -const ( - GroupName = "webconsole.openshift.io" -) - -var ( - schemeBuilder = runtime.NewSchemeBuilder(webconsolev1.Install) - // Install is a function which adds every version of this group to a scheme - Install = schemeBuilder.AddToScheme -) - -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -func Kind(kind string) schema.GroupKind { - return schema.GroupKind{Group: GroupName, Kind: kind} -} diff --git a/vendor/github.com/openshift/api/webconsole/v1/doc.go b/vendor/github.com/openshift/api/webconsole/v1/doc.go deleted file mode 100644 index 1e2bcbd0c..000000000 --- a/vendor/github.com/openshift/api/webconsole/v1/doc.go +++ /dev/null @@ -1,8 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/openshift/origin-web-console-server/pkg/apis/webconsole -// +k8s:defaulter-gen=TypeMeta -// +k8s:openapi-gen=true - -// +groupName=webconsole.config.openshift.io -// Package v1 is the v1 version of the API. -package v1 diff --git a/vendor/github.com/openshift/api/webconsole/v1/register.go b/vendor/github.com/openshift/api/webconsole/v1/register.go deleted file mode 100644 index de85ae111..000000000 --- a/vendor/github.com/openshift/api/webconsole/v1/register.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1 - -import ( - configv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -var ( - GroupName = "webconsole.config.openshift.io" - GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) - // Install is a function which adds this version to a scheme - Install = schemeBuilder.AddToScheme - - // SchemeGroupVersion generated code relies on this name - // Deprecated - SchemeGroupVersion = GroupVersion - // AddToScheme exists solely to keep the old generators creating valid code - // DEPRECATED - AddToScheme = schemeBuilder.AddToScheme -) - -// Resource generated code relies on this being here, but it logically belongs to the group -// DEPRECATED -func Resource(resource string) schema.GroupResource { - return schema.GroupResource{Group: GroupName, Resource: resource} -} - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(GroupVersion, - &WebConsoleConfiguration{}, - ) - metav1.AddToGroupVersion(scheme, GroupVersion) - return nil -} diff --git a/vendor/github.com/openshift/api/webconsole/v1/types.go b/vendor/github.com/openshift/api/webconsole/v1/types.go deleted file mode 100644 index 27a62e77f..000000000 --- a/vendor/github.com/openshift/api/webconsole/v1/types.go +++ /dev/null @@ -1,75 +0,0 @@ -package v1 - -import ( - configv1 "github.com/openshift/api/config/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// WebConsoleConfiguration holds the necessary configuration options for serving the web console -type WebConsoleConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // ServingInfo is the HTTP serving information for these assets - ServingInfo configv1.HTTPServingInfo `json:"servingInfo"` - - // ClusterInfo holds information the web console needs to talk to the cluster such as master public URL - // and metrics public URL - ClusterInfo ClusterInfo `json:"clusterInfo"` - - // Features define various feature gates for the web console - Features FeaturesConfiguration `json:"features"` - - // Extensions define custom scripts, stylesheets, and properties used for web console customization - Extensions ExtensionsConfiguration `json:"extensions"` -} - -// ClusterInfo holds information the web console needs to talk to the cluster such as master public URL and -// metrics public URL -type ClusterInfo struct { - // ConsolePublicURL is where you can find the web console server (TODO do we really need this?) - ConsolePublicURL string `json:"consolePublicURL"` - - // MasterPublicURL is how the web console can access the OpenShift v1 server - MasterPublicURL string `json:"masterPublicURL"` - - // LoggingPublicURL is the public endpoint for logging (optional) - LoggingPublicURL string `json:"loggingPublicURL"` - - // MetricsPublicURL is the public endpoint for metrics (optional) - MetricsPublicURL string `json:"metricsPublicURL"` - - // LogoutPublicURL is an optional, absolute URL to redirect web browsers to after logging out of the web - // console. If not specified, the built-in logout page is shown. - LogoutPublicURL string `json:"logoutPublicURL"` - - // AdminConsolePublicURL is an optional, public URL of the OpenShift admin console. If specified, the web - // console will add a link to the admin console in a context selector in its masthead. - AdminConsolePublicURL string `json:"adminConsolePublicURL"` -} - -// FeaturesConfiguration defines various feature gates for the web console -type FeaturesConfiguration struct { - // InactivityTimeoutMinutes is the number of minutes of inactivity before you are automatically logged out of - // the web console (optional). If set to 0, inactivity timeout is disabled. - InactivityTimeoutMinutes int64 `json:"inactivityTimeoutMinutes"` - - // ClusterResourceOverridesEnabled indicates that the cluster is configured for overcommit. When set to - // true, the web console will hide the CPU request, CPU limit, and memory request fields in its editors - // and skip validation on those fields. The memory limit field will still be displayed. - ClusterResourceOverridesEnabled bool `json:"clusterResourceOverridesEnabled"` -} - -// ExtensionsConfiguration holds custom script, stylesheets, and properties used for web console customization -type ExtensionsConfiguration struct { - // ScriptURLs are URLs to load as scripts when the Web Console loads. The URLs must be accessible from - // the browser. - ScriptURLs []string `json:"scriptURLs"` - // StylesheetURLs are URLs to load as stylesheets when the Web Console loads. The URLs must be accessible - // from the browser. - StylesheetURLs []string `json:"stylesheetURLs"` - // Properties are key(string) and value(string) pairs that will be injected into the console under the - // global variable OPENSHIFT_EXTENSION_PROPERTIES - Properties map[string]string `json:"properties"` -} diff --git a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/webconsole/v1/zz_generated.deepcopy.go deleted file mode 100644 index c21658740..000000000 --- a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo. -func (in *ClusterInfo) DeepCopy() *ClusterInfo { - if in == nil { - return nil - } - out := new(ClusterInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtensionsConfiguration) DeepCopyInto(out *ExtensionsConfiguration) { - *out = *in - if in.ScriptURLs != nil { - in, out := &in.ScriptURLs, &out.ScriptURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.StylesheetURLs != nil { - in, out := &in.StylesheetURLs, &out.StylesheetURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Properties != nil { - in, out := &in.Properties, &out.Properties - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtensionsConfiguration. -func (in *ExtensionsConfiguration) DeepCopy() *ExtensionsConfiguration { - if in == nil { - return nil - } - out := new(ExtensionsConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FeaturesConfiguration) DeepCopyInto(out *FeaturesConfiguration) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeaturesConfiguration. -func (in *FeaturesConfiguration) DeepCopy() *FeaturesConfiguration { - if in == nil { - return nil - } - out := new(FeaturesConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebConsoleConfiguration) DeepCopyInto(out *WebConsoleConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ServingInfo.DeepCopyInto(&out.ServingInfo) - out.ClusterInfo = in.ClusterInfo - out.Features = in.Features - in.Extensions.DeepCopyInto(&out.Extensions) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebConsoleConfiguration. -func (in *WebConsoleConfiguration) DeepCopy() *WebConsoleConfiguration { - if in == nil { - return nil - } - out := new(WebConsoleConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *WebConsoleConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go deleted file mode 100644 index 9e96d0774..000000000 --- a/vendor/github.com/openshift/api/webconsole/v1/zz_generated.swagger_doc_generated.go +++ /dev/null @@ -1,61 +0,0 @@ -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE -var map_ClusterInfo = map[string]string{ - "": "ClusterInfo holds information the web console needs to talk to the cluster such as master public URL and metrics public URL", - "consolePublicURL": "ConsolePublicURL is where you can find the web console server (TODO do we really need this?)", - "masterPublicURL": "MasterPublicURL is how the web console can access the OpenShift v1 server", - "loggingPublicURL": "LoggingPublicURL is the public endpoint for logging (optional)", - "metricsPublicURL": "MetricsPublicURL is the public endpoint for metrics (optional)", - "logoutPublicURL": "LogoutPublicURL is an optional, absolute URL to redirect web browsers to after logging out of the web console. If not specified, the built-in logout page is shown.", - "adminConsolePublicURL": "AdminConsolePublicURL is an optional, public URL of the OpenShift admin console. If specified, the web console will add a link to the admin console in a context selector in its masthead.", -} - -func (ClusterInfo) SwaggerDoc() map[string]string { - return map_ClusterInfo -} - -var map_ExtensionsConfiguration = map[string]string{ - "": "ExtensionsConfiguration holds custom script, stylesheets, and properties used for web console customization", - "scriptURLs": "ScriptURLs are URLs to load as scripts when the Web Console loads. The URLs must be accessible from the browser.", - "stylesheetURLs": "StylesheetURLs are URLs to load as stylesheets when the Web Console loads. The URLs must be accessible from the browser.", - "properties": "Properties are key(string) and value(string) pairs that will be injected into the console under the global variable OPENSHIFT_EXTENSION_PROPERTIES", -} - -func (ExtensionsConfiguration) SwaggerDoc() map[string]string { - return map_ExtensionsConfiguration -} - -var map_FeaturesConfiguration = map[string]string{ - "": "FeaturesConfiguration defines various feature gates for the web console", - "inactivityTimeoutMinutes": "InactivityTimeoutMinutes is the number of minutes of inactivity before you are automatically logged out of the web console (optional). If set to 0, inactivity timeout is disabled.", - "clusterResourceOverridesEnabled": "ClusterResourceOverridesEnabled indicates that the cluster is configured for overcommit. When set to true, the web console will hide the CPU request, CPU limit, and memory request fields in its editors and skip validation on those fields. The memory limit field will still be displayed.", -} - -func (FeaturesConfiguration) SwaggerDoc() map[string]string { - return map_FeaturesConfiguration -} - -var map_WebConsoleConfiguration = map[string]string{ - "": "WebConsoleConfiguration holds the necessary configuration options for serving the web console", - "servingInfo": "ServingInfo is the HTTP serving information for these assets", - "clusterInfo": "ClusterInfo holds information the web console needs to talk to the cluster such as master public URL and metrics public URL", - "features": "Features define various feature gates for the web console", - "extensions": "Extensions define custom scripts, stylesheets, and properties used for web console customization", -} - -func (WebConsoleConfiguration) SwaggerDoc() map[string]string { - return map_WebConsoleConfiguration -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile index 2ec73b087..b44d020e4 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/Makefile @@ -14,6 +14,7 @@ examples :=$(wildcard ./make/examples/*/Makefile.test) # Delete lines referencing temporary files and directories # Unify make error output between versions # Ignore old cp errors on centos7 +# Ignore different make output with `-k` option define update-makefile-log mkdir -p "$(3)" set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-directory --warn-undefined-variables $(2) 2>&1 | \ @@ -24,6 +25,7 @@ set -o pipefail; $(MAKE) -j 1 -C "$(dir $(1))" -f "$(notdir $(1))" --no-print-di sed -E 's~^[<> ]*((\+\+\+|\-\-\-) \./(testing/)?manifests/.*.yaml).*~\1~' | \ sed -E 's/^(make\[2\]: \*\*\* \[).*: (.*\] Error 1)/\1\2/' | \ grep -v 'are the same file' | \ + grep -E -v -e '^make\[2\]: Target `.*'"'"' not remade because of errors\.$$' | \ tee "$(3)"/"$(notdir $(1))"$(subst ..,.,.$(2).log) endef diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk index 564fc1229..30806edb1 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/default.mk @@ -13,7 +13,7 @@ update: update-bindata # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ targets/openshift/deps.mk \ targets/openshift/images.mk \ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile index 3ede7702e..6f7e2c812 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile @@ -2,11 +2,12 @@ include $(addprefix ../../, \ golang.mk \ targets/openshift/rpm.mk \ targets/openshift/crd-schema-gen.mk \ + targets/openshift/deps.mk \ ) # Set crd-schema-gen variables -CRD_SCHEMA_GEN_APIS :=$(addprefix ./pkg/apis/,v1 v1beta1) -CRD_SCHEMA_GEN_VERSION :=v0.2.1 +CONTROLLER_GEN_VERSION :=v0.2.1 +CRD_APIS :=$(addprefix ./pkg/apis/,v1 v1beta1) # rpm wants build-id set GO_LD_EXTRAFLAGS +=-B 0x$$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n') @@ -15,6 +16,12 @@ OUTPUT_DIR :=_output CROSS_BUILD_BINDIR :=$(OUTPUT_DIR)/bin RPM_EXTRAFLAGS :=--quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +$(call add-crd-gen,manifests,$(CRD_APIS),./manifests,./manifests) + cross-build-darwin-amd64: +@GOOS=darwin GOARCH=amd64 $(MAKE) --no-print-directory build GO_BUILD_BINDIR:=$(CROSS_BUILD_BINDIR)/darwin_amd64 .PHONY: cross-build-darwin-amd64 diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test index 4d4754a63..65700ea62 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test @@ -27,7 +27,7 @@ test-build: test-cross-build: [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) - $(MAKE) cross-build + $(MAKE) cross-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean [[ ! -f ./openshift ]] [[ ! -f ./oc ]] [[ -f ./_output/bin/darwin_amd64/openshift ]] @@ -43,7 +43,7 @@ test-cross-build: test-rpm: [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) - $(MAKE) rpm-build + $(MAKE) rpm-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean [[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] [[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] @@ -59,8 +59,9 @@ test-codegen: $(MAKE) update-codegen-crds $(MAKE) verify-codegen-crds - ! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null - diff -Naup ./testing/manifests/updated/ ./manifests/ 2>/dev/null + cp -r ./testing/manifests/initial/*.crd.yaml-merge-patch ./manifests/ + ! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null 1>&2 + diff -Naup ./testing/manifests/updated/ ./manifests/ $(MAKE) clean [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log index c88256081..eb03961a2 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/Makefile.test.log @@ -40,15 +40,11 @@ if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_outpu rm -f -r '_output/bin' if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) -make cross-build -fatal: No names found, cannot describe anything. -fatal: No names found, cannot describe anything. +make cross-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean mkdir -p '_output/bin/darwin_amd64' go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/oc' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc mkdir -p '_output/bin/darwin_amd64' go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/darwin_amd64/openshift' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/openshift -fatal: No names found, cannot describe anything. -fatal: No names found, cannot describe anything. mkdir -p '_output/bin/windows_amd64' go build -ldflags "-s -w -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.versionFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.commitFromGit="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.gitTreeState="" -X github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/version.buildDate="" -B 0x$(head -c20 /dev/urandom|od -An -tx1|tr -d ' \n')" -o '_output/bin/windows_amd64/oc.exe' github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/cmd/oc mkdir -p '_output/bin/windows_amd64' @@ -85,10 +81,8 @@ if [ -d '_output/tools/bin/' ]; then rmdir --ignore-fail-on-non-empty -p '_outpu rm -f -r '_output/bin' if [ -d '_output' ]; then rmdir --ignore-fail-on-non-empty '_output'; fi [[ ! -d ./_output/ ]] || (ls -l ./_output/ && false) -make rpm-build +make rpm-build SOURCE_GIT_TAG=v42.43.44 SOURCE_GIT_COMMIT=aaa SOURCE_GIT_TREE_STATE=clean rpmbuild -ba --define "_topdir /github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --define "go_package github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries" --quiet --define 'version 2.42.0' --define 'dist .el7' --define 'release 6' ocp.spec -fatal: No names found, cannot describe anything. -fatal: No names found, cannot describe anything. [[ -f ./_output/rpms/x86_64/openshift-2.42.0-6.el7.x86_64.rpm ]] [[ -f ./_output/srpms/openshift-2.42.0-6.el7.src.rpm ]] make clean @@ -139,27 +133,14 @@ Installing yq into '_output/tools/bin/yq' mkdir -p '_output/tools/bin/' curl -s -f -L https://github.com/mikefarah/yq/releases/download/2.4.0/yq_linux_amd64 -o '_output/tools/bin/yq' chmod +x '_output/tools/bin/yq'; -'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ - schemapatch:manifests="./manifests" \ - paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ --- ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml -@@ -11,9 +11,39 @@ spec: +@@ -9,6 +9,40 @@ spec: + kind: MyOtherOperatorResource + plural: myotheroperatorresources scope: "" - version: v1beta1 - versions: -- - name: v1beta1 -- served: true -- storage: true -+ - name: v1beta1 -+ served: true -+ storage: true -+ "validation": -+ "openAPIV3Schema": ++ validation: ++ openAPIV3Schema: + description: MyOtherOperatorResource is an example operator configuration type -+ type: object -+ required: -+ - metadata -+ - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation @@ -174,97 +155,39 @@ chmod +x '_output/tools/bin/yq'; + metadata: + type: object + spec: -+ type: object -+ required: -+ - deprecatedField -+ - name + properties: + deprecatedField: + type: string + name: + type: string - status: - acceptedNames: - kind: "" -make[2]: *** [verify-codegen-crds] Error 1 ++ overwritePattern: ++ pattern: ^(Managed|Unmanaged)$ ++ type: string ++ required: ++ - deprecatedField ++ - name ++ - overwritePattern ++ type: object ++ required: ++ - metadata ++ - spec ++ type: object + version: v1beta1 + versions: + - name: v1beta1 +make[2]: *** [verify-codegen-crds-manifests] Error 1 make update-codegen-crds Using existing controller-gen from "_output/tools/src/sigs.k8s.io/controller-tools/controller-gen" Using existing yq from "_output/tools/bin/yq" -'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ - schemapatch:manifests="./manifests" \ - paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ - output:dir="./manifests" -cp -n ./manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch './manifests/' || true # FIXME: centos -_output/tools/bin/yq m -i './manifests/operator.openshift.io_myoperatorresources.crd.yaml' './manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch' +'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' schemapatch:manifests="./manifests" paths="./pkg/apis/v1;./pkg/apis/v1beta1" output:dir="./manifests" +_output/tools/bin/yq m -i -x './manifests/operator.openshift.io_myotheroperatorresources.crd.yaml' './manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch' +_output/tools/bin/yq m -i -x './manifests/operator.openshift.io_myoperatorresources.crd.yaml' './manifests/operator.openshift.io_myoperatorresources.crd.yaml-merge-patch' make verify-codegen-crds Using existing controller-gen from "_output/tools/src/sigs.k8s.io/controller-tools/controller-gen" Using existing yq from "_output/tools/bin/yq" -'_output/tools/src/sigs.k8s.io/controller-tools/controller-gen' \ - schemapatch:manifests="./manifests" \ - paths="./pkg/apis/v1;./pkg/apis/v1beta1" \ -! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null -diff -Naup ./testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml ./manifests/operator.openshift.io_myoperatorresources.crd.yaml ---- ./testing/manifests/initial/operator.openshift.io_myoperatorresources.crd.yaml -+++ ./manifests/operator.openshift.io_myoperatorresources.crd.yaml -@@ -9,6 +9,11 @@ spec: - kind: MyOperatorResource - plural: myoperatorresources - scope: "" -+ validation: -+ openAPIV3Schema: -+ properties: -+ apiVersion: -+ pattern: ^(test|TEST)$ - status: - acceptedNames: - kind: "" -diff -Naup ./testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml ---- ./testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml -+++ ./manifests/operator.openshift.io_myotheroperatorresources.crd.yaml -@@ -11,9 +11,39 @@ spec: - scope: "" - version: v1beta1 - versions: -- - name: v1beta1 -- served: true -- storage: true -+ - name: v1beta1 -+ served: true -+ storage: true -+ "validation": -+ "openAPIV3Schema": -+ description: MyOtherOperatorResource is an example operator configuration type -+ type: object -+ required: -+ - metadata -+ - spec -+ properties: -+ apiVersion: -+ description: 'APIVersion defines the versioned schema of this representation -+ of an object. Servers should convert recognized schemas to the latest -+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' -+ type: string -+ kind: -+ description: 'Kind is a string value representing the REST resource this -+ object represents. Servers may infer this from the endpoint the client -+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' -+ type: string -+ metadata: -+ type: object -+ spec: -+ type: object -+ required: -+ - deprecatedField -+ - name -+ properties: -+ deprecatedField: -+ type: string -+ name: -+ type: string - status: - acceptedNames: - kind: "" -diff -Naup ./testing/manifests/updated/ ./manifests/ 2>/dev/null +cp -r ./testing/manifests/initial/*.crd.yaml-merge-patch ./manifests/ +! diff -Naup ./testing/manifests/initial/ ./manifests/ 2>/dev/null 1>&2 +diff -Naup ./testing/manifests/updated/ ./manifests/ make clean rm -f oc openshift rm -f -r '/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/_output/srpms' diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock new file mode 100644 index 000000000..339b04e1a --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.lock @@ -0,0 +1,61 @@ +hash: 1aaf998ca41bbf291600272fff83e55b5e9536f817ab15aedb2fa9459b15336f +updated: 2019-10-07T15:28:28.103413125+02:00 +imports: +- name: github.com/gogo/protobuf + version: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0 + subpackages: + - proto + - sortkeys +- name: github.com/google/gofuzz + version: f140a6486e521aad38f5917de355cbf147cc0496 +- name: github.com/openshift/api + version: d92789481b059132f1febccd9bb47cb27220f248 + subpackages: + - config/v1 +- name: golang.org/x/net + version: cdfb69ac37fc6fa907650654115ebebb3aae2087 + subpackages: + - http/httpguts + - http2 + - http2/hpack + - idna +- name: golang.org/x/text + version: e6919f6577db79269a6443b9dc46d18f2238fb5d + subpackages: + - secure/bidirule + - transform + - unicode/bidi + - unicode/norm +- name: gopkg.in/inf.v0 + version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 +- name: k8s.io/api + version: 95b840bb6a1f5f0462af804c8589396d294d4914 + subpackages: + - core/v1 +- name: k8s.io/apimachinery + version: 27d36303b6556f377b4f34e64705fa9024a12b0c + subpackages: + - pkg/api/resource + - pkg/apis/meta/v1 + - pkg/conversion + - pkg/conversion/queryparams + - pkg/fields + - pkg/labels + - pkg/runtime + - pkg/runtime/schema + - pkg/selection + - pkg/types + - pkg/util/errors + - pkg/util/intstr + - pkg/util/json + - pkg/util/naming + - pkg/util/net + - pkg/util/runtime + - pkg/util/sets + - pkg/util/validation + - pkg/util/validation/field + - pkg/watch + - third_party/forked/golang/reflect +- name: k8s.io/klog + version: 3ca30a56d8a775276f9cdae009ba326fdc05af7f +testImports: [] diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml new file mode 100644 index 000000000..1c5768bcd --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/glide.yaml @@ -0,0 +1,4 @@ +package: github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries +import: +- package: github.com/openshift/api +- package: k8s.io/apimachinery diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml index 9ceac7634..05363164d 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -9,18 +9,9 @@ spec: kind: MyOtherOperatorResource plural: myotheroperatorresources scope: "" - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true - "validation": - "openAPIV3Schema": + validation: + openAPIV3Schema: description: MyOtherOperatorResource is an example operator configuration type - type: object - required: - - metadata - - spec properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -35,15 +26,28 @@ spec: metadata: type: object spec: - type: object - required: - - deprecatedField - - name properties: deprecatedField: type: string name: type: string + overwritePattern: + pattern: ^(Managed|Unmanaged)$ + type: string + required: + - deprecatedField + - name + - overwritePattern + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..1eebf507e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/manifests/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go index 5c1787309..9169f90a6 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/pkg/apis/v1beta1/types.go @@ -22,4 +22,7 @@ type MyOtherOperatorResource struct { type MyOtherOperatorResourceSpec struct { Name string `json:"name"` DeprecatedField string `json:"deprecatedField"` + + // +kubebuilder:validation:Pattern=^(Managed|Unmanaged)$ + OverwritePattern string `json:"overwritePattern"` } diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml index 622a5279a..fe529245e 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -11,9 +11,9 @@ spec: scope: "" version: v1beta1 versions: - - name: v1beta1 - served: true - storage: true + - name: v1beta1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..1eebf507e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/initial/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml index 9ceac7634..05363164d 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml @@ -9,18 +9,9 @@ spec: kind: MyOtherOperatorResource plural: myotheroperatorresources scope: "" - version: v1beta1 - versions: - - name: v1beta1 - served: true - storage: true - "validation": - "openAPIV3Schema": + validation: + openAPIV3Schema: description: MyOtherOperatorResource is an example operator configuration type - type: object - required: - - metadata - - spec properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation @@ -35,15 +26,28 @@ spec: metadata: type: object spec: - type: object - required: - - deprecatedField - - name properties: deprecatedField: type: string name: type: string + overwritePattern: + pattern: ^(Managed|Unmanaged)$ + type: string + required: + - deprecatedField + - name + - overwritePattern + type: object + required: + - metadata + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true status: acceptedNames: kind: "" diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch new file mode 100644 index 000000000..1eebf507e --- /dev/null +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/examples/multiple-binaries/testing/manifests/updated/operator.openshift.io_myotheroperatorresources.crd.yaml-merge-patch @@ -0,0 +1,8 @@ +spec: + validation: + openAPIV3Schema: + properties: + spec: + properties: + overwritePattern: + pattern: ^(Managed|Unmanaged)$ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk index 2184f1748..89d457ef0 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/lib/golang.mk @@ -12,7 +12,7 @@ GOFMT ?=gofmt GOFMT_FLAGS ?=-s -l GOLINT ?=golint -GO_FILES ?=$(shell find . -name '*.go' -not -path './vendor/*' -print) +GO_FILES ?=$(shell find . -name '*.go' -not -path '*/vendor/*' -not -path '*/_output/*' -print) GO_PACKAGES ?=./... GO_TEST_PACKAGES ?=$(GO_PACKAGES) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk index 9a71cb793..07c15faa4 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/build.mk @@ -22,7 +22,7 @@ clean: clean-binaries # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk index f96c8ccd7..908783aac 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/test-unit.mk @@ -13,7 +13,7 @@ endif # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk index 2034cd10e..78b2783ba 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/golang/verify-update.mk @@ -28,7 +28,7 @@ verify-golint: # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk index 0e78cb927..97b7bd36d 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/bindata.mk @@ -3,7 +3,7 @@ TMP_GOPATH :=$(shell mktemp -d) .ensure-go-bindata: ln -s $(abspath ./vendor) "$(TMP_GOPATH)/src" - export GOPATH=$(TMP_GOPATH) && export GOBIN=$(TMP_GOPATH)/bin && go install "./vendor/github.com/jteeuwen/go-bindata/..." + export GO111MODULE=off && export GOPATH=$(TMP_GOPATH) && export GOBIN=$(TMP_GOPATH)/bin && go install "./vendor/github.com/jteeuwen/go-bindata/..." # $1 - input dirs # $2 - prefix diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk index fd0ff401e..a14926e54 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/controller-gen.mk @@ -31,7 +31,7 @@ clean: clean-controller-gen # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ../../lib/tmp.mk \ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk index ac8038b83..2e152fd65 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/crd-schema-gen.mk @@ -1,46 +1,62 @@ self_dir :=$(dir $(lastword $(MAKEFILE_LIST))) -CRD_SCHEMA_GEN_APIS ?=$(error CRD_SCHEMA_GEN_APIS is required) -CRD_SCHEMA_GEN_MANIFESTS ?=./manifests -CRD_SCHEMA_GEN_OUTPUT ?=./manifests - -crd_patches =$(subst $(CRD_SCHEMA_GEN_MANIFESTS),$(CRD_SCHEMA_GEN_OUTPUT),$(wildcard $(CRD_SCHEMA_GEN_MANIFESTS)/*.crd.yaml-merge-patch)) - # $1 - crd file # $2 - patch file define patch-crd - cp -n $(CRD_SCHEMA_GEN_MANIFESTS)/$(notdir $2) '$(CRD_SCHEMA_GEN_OUTPUT)/' || true # FIXME: centos - $(YQ) m -i '$(1)' '$(2)' + $(YQ) m -i -x '$(1)' '$(2)' endef empty := -update-codegen-crds: ensure-controller-gen ensure-yq + +define diff-file + diff -Naup '$(1)' '$(2)' + +endef + +# $1 - apis +# $2 - manifests +# $3 - output +define run-crd-gen '$(CONTROLLER_GEN)' \ - schemapatch:manifests="$(CRD_SCHEMA_GEN_MANIFESTS)" \ - paths="$(subst $(empty) ,;,$(CRD_SCHEMA_GEN_APIS))" \ - output:dir="$(CRD_SCHEMA_GEN_OUTPUT)" - $(foreach p,$(crd_patches),$(call patch-crd,$(basename $(p)).yaml,$(p))) + schemapatch:manifests="$(2)" \ + paths="$(subst $(empty) ,;,$(1))" \ + output:dir="$(3)" + $$(foreach p,$$(wildcard $(2)/*.crd.yaml-merge-patch),$$(call patch-crd,$$(subst $(2),$(3),$$(basename $$(p))).yaml,$$(p))) +endef + + +# $1 - target name +# $2 - apis +# $3 - manifests +# $4 - output +define add-crd-gen-internal + +update-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$(4)) +.PHONY: update-codegen-crds-$(1) + +update-codegen-crds: update-codegen-crds-$(1) .PHONY: update-codegen-crds +verify-codegen-crds-$(1): VERIFY_CODEGEN_CRD_TMP_DIR:=$(shell mktemp -d) +verify-codegen-crds-$(1): ensure-controller-gen ensure-yq + $(call run-crd-gen,$(2),$(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR)) + $$(foreach p,$$(wildcard $(3)/*.crd.yaml),$$(call diff-file,$$(p),$$(subst $(3),$$(VERIFY_CODEGEN_CRD_TMP_DIR),$$(p)))) +.PHONY: verify-codegen-crds-$(1) + +verify-codegen-crds: verify-codegen-crds-$(1) +.PHONY: verify-codegen-crds + +endef + + update-generated: update-codegen-crds .PHONY: update-generated update: update-generated .PHONY: update -# $1 - manifest (actual) crd -# $2 - temp crd -define diff-crd - diff -Naup $(1) $(2) - -endef - -verify-codegen-crds: CRD_SCHEMA_GEN_OUTPUT :=$(shell mktemp -d) -verify-codegen-crds: update-codegen-crds - $(foreach p,$(wildcard $(CRD_SCHEMA_GEN_MANIFESTS)/*.crd.yaml),$(call diff-crd,$(p),$(subst $(CRD_SCHEMA_GEN_MANIFESTS),$(CRD_SCHEMA_GEN_OUTPUT),$(p)))) -.PHONY: verify-codegen-crds - verify-generated: verify-codegen-crds .PHONY: verify-generated @@ -48,9 +64,14 @@ verify: verify-generated .PHONY: verify +define add-crd-gen +$(eval $(call add-crd-gen-internal,$(1),$(2),$(3),$(4))) +endef + + # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ../../lib/tmp.mk \ diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk index b235197c7..3f20bb1ca 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/rpm.mk @@ -35,7 +35,7 @@ clean: clean-rpms # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ) diff --git a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk index 7dd556d40..2679e3add 100644 --- a/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk +++ b/vendor/github.com/openshift/library-go/alpha-build-machinery/make/targets/openshift/yq.mk @@ -25,7 +25,7 @@ clean: clean-yq # We need to be careful to expand all the paths before any include is done # or self_dir could be modified for the next include by the included file. -# Also doing this at the end of the file allows us to user self_dir before it could be modified. +# Also doing this at the end of the file allows us to use self_dir before it could be modified. include $(addprefix $(self_dir), \ ../../lib/golang.mk \ ../../lib/tmp.mk \ diff --git a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go index cd5f6a749..bf2bdf1a8 100644 --- a/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go +++ b/vendor/github.com/openshift/library-go/pkg/assets/create/creater.go @@ -263,7 +263,7 @@ func load(assetsDir string, options CreateOptions) (map[string]*unstructured.Uns } manifestUnstructured, ok := manifestObj.(*unstructured.Unstructured) if !ok { - errs[manifestPath] = fmt.Errorf("unable to convert asset %q to unstructed", manifestPath) + errs[manifestPath] = fmt.Errorf("unable to convert asset %q to unstructured", manifestPath) continue } manifests[manifestPath] = manifestUnstructured diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go index 5a593d2b1..3bb305ad2 100644 --- a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -542,27 +542,36 @@ func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int } func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, expireDays) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { var caLifetimeInDays = DefaultCACertificateLifetimeInDays if expireDays > 0 { caLifetimeInDays = expireDays } if caLifetimeInDays > DefaultCACertificateLifetimeInDays { - warnAboutCertificateLifeTime(name, DefaultCACertificateLifetimeInDays) + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) } caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour - - return MakeSelfSignedCAConfigForDuration(name, caLifetime) + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) } func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, caLifetime time.Duration) (*TLSCertificateConfig, error) { // Create CA cert rootcaPublicKey, rootcaPrivateKey, err := NewKeyPair() if err != nil { return nil, err } - rootcaTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now) + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, time.Now) rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) if err != nil { return nil, err diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go index 0596a0dcf..042904565 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client.go @@ -20,6 +20,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/manifest/schema1" "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" registryclient "github.com/docker/distribution/registry/client" "github.com/docker/distribution/registry/client/auth" "github.com/docker/distribution/registry/client/auth/challenge" @@ -369,8 +370,20 @@ func isTemporaryHTTPError(err error) (time.Duration, bool) { switch t := err.(type) { case net.Error: return time.Second, t.Temporary() || t.Timeout() + case errcode.ErrorCoder: + // note: we explicitly do not check errcode.ErrorCodeUnknown because that is used in + // a wide range of scenarios to convey "generic error", not "retryable error" + switch t.ErrorCode() { + case errcode.ErrorCodeUnavailable: + return 5 * time.Second, true + case errcode.ErrorCodeTooManyRequests: + return 2 * time.Second, true + } case *registryclient.UnexpectedHTTPResponseError: - if t.StatusCode == http.StatusTooManyRequests { + switch t.StatusCode { + case http.StatusInternalServerError, http.StatusGatewayTimeout, http.StatusServiceUnavailable, http.StatusBadGateway: + return 5 * time.Second, true + case http.StatusTooManyRequests: return 2 * time.Second, true } } diff --git a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go index 9fb65fb7a..db1791a02 100644 --- a/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go +++ b/vendor/github.com/openshift/library-go/pkg/image/registryclient/client_test.go @@ -18,6 +18,7 @@ import ( "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/distribution/registry/api/errcode" + registryclient "github.com/docker/distribution/registry/client" "github.com/opencontainers/go-digest" "golang.org/x/net/context" ) @@ -320,13 +321,14 @@ func TestRetryFailure(t *testing.T) { t.Fatalf("unexpected: %v %v %#v", m, err, r) } - // retry four times + // verify docker known errors repo = &mockRepository{ getErr: temporaryError{}, blobs: &mockBlobStore{ - serveErr: temporaryError{}, - statErr: temporaryError{}, - openErr: temporaryError{}, + serveErr: errcode.ErrorCodeTooManyRequests.WithDetail(struct{}{}), + statErr: errcode.ErrorCodeUnavailable.WithDetail(struct{}{}), + // not retriable + openErr: errcode.ErrorCodeUnknown.WithDetail(struct{}{}), }, } r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) @@ -335,7 +337,7 @@ func TestRetryFailure(t *testing.T) { if m, err = r.Manifests(ctx); err != nil { t.Fatal(err) } - r.retries = 2 + r.retries = 1 if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { t.Fatalf("unexpected: %v %#v", err, r) } @@ -343,11 +345,12 @@ func TestRetryFailure(t *testing.T) { if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { t.Fatalf("unexpected: %v %v %#v", m, err, r) } - if sleeps != 4 { + if sleeps != 3 { t.Fatal(sleeps) } - r.retries = 2 + sleeps = 0 + r.retries = 1 b := r.Blobs(ctx) if err != nil { t.Fatal(err) @@ -359,10 +362,139 @@ func TestRetryFailure(t *testing.T) { if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { t.Fatalf("unexpected: %v %#v", err, r) } + r.retries = 4 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + // Open did not retry + if sleeps != 3 { + t.Fatal(sleeps) + } + + // verify unknown client errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusTooManyRequests}, + statErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusServiceUnavailable}, + openErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusInternalServerError}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } + + sleeps = 0 + r.retries = 1 + b = r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 4 if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { t.Fatalf("unexpected: %v %#v", err, r) } + // Open did not retry + if sleeps != 7 { + t.Fatal(sleeps) + } + + // verify more unknown client errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusBadGateway}, + statErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusGatewayTimeout}, + openErr: ®istryclient.UnexpectedHTTPResponseError{StatusCode: http.StatusInternalServerError}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } + + sleeps = 0 + r.retries = 1 + b = r.Blobs(ctx) + if err != nil { + t.Fatal(err) + } + if _, err := b.Stat(ctx, digest.Digest("x")); err != repo.blobs.statErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if err := b.ServeBlob(ctx, nil, nil, digest.Digest("foo")); err != repo.blobs.serveErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 4 + if _, err := b.Open(ctx, digest.Digest("foo")); err != repo.blobs.openErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + // Open did not retry + if sleeps != 7 { + t.Fatal(sleeps) + } + + // retry with temporary errors + repo = &mockRepository{ + getErr: temporaryError{}, + blobs: &mockBlobStore{ + serveErr: temporaryError{}, + statErr: temporaryError{}, + openErr: temporaryError{}, + }, + } + r = NewLimitedRetryRepository(repo, 4, unlimited).(*retryRepository) + sleeps = 0 + r.sleepFn = sleepFn + if m, err = r.Manifests(ctx); err != nil { + t.Fatal(err) + } + r.retries = 1 + if _, err := m.Get(ctx, digest.Digest("foo")); err != repo.getErr { + t.Fatalf("unexpected: %v %#v", err, r) + } + r.retries = 2 + if m, err := m.Exists(ctx, "foo"); m || err != repo.getErr { + t.Fatalf("unexpected: %v %v %#v", m, err, r) + } + if sleeps != 3 { + t.Fatal(sleeps) + } } func Test_verifyManifest_Get(t *testing.T) { diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go index b4f11db96..da1a1cfa3 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -234,23 +234,7 @@ func ApplySecret(client coreclientv1.SecretsGetter, recorder events.Recorder, re existingCopy.Data = required.Data if klog.V(4) { - safeRequired := required.DeepCopy() - safeExisting := existing.DeepCopy() - - for s := range safeExisting.Data { - safeExisting.Data[s] = []byte("OLD") - } - for s := range safeRequired.Data { - if _, preexisting := existing.Data[s]; !preexisting { - safeRequired.Data[s] = []byte("NEW") - } else if !equality.Semantic.DeepEqual(existing.Data[s], safeRequired.Data[s]) { - safeRequired.Data[s] = []byte("MODIFIED") - } else { - safeRequired.Data[s] = []byte("OLD") - } - } - - klog.Infof("Secret %q changes: %v", required.Namespace+"/"+required.Name, JSONPatch(safeExisting, safeRequired)) + klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecret(existing, required)) } actual, err := client.Secrets(required.Namespace).Update(existingCopy) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go index c5077f48e..c053be7ab 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -4,6 +4,9 @@ import ( "fmt" patch "github.com/evanphx/json-patch" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" ) @@ -31,3 +34,25 @@ func JSONPatch(original, modified runtime.Object) string { } return string(patchBytes) } + +// JSONPatchSecret generates a JSON patch between original and modified secrets, hiding its data, +// and return the JSON as a string. In case of error, the returned string will contain the error messages. +func JSONPatchSecret(original, modified *corev1.Secret) string { + safeModified := modified.DeepCopy() + safeOriginal := original.DeepCopy() + + for s := range safeOriginal.Data { + safeOriginal.Data[s] = []byte("OLD") + } + for s := range safeModified.Data { + if _, preoriginal := original.Data[s]; !preoriginal { + safeModified.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) { + safeModified.Data[s] = []byte("MODIFIED") + } else { + safeModified.Data[s] = []byte("OLD") + } + } + + return JSONPatch(safeOriginal, safeModified) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go index e581a4418..f2d8b5dc0 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod/certsync_controller.go @@ -79,6 +79,7 @@ func getSecretDir(targetDir, secretName string) string { func (c *CertSyncController) sync() error { errors := []error{} + klog.Infof("Syncing configmaps: %v", c.configMaps) for _, cm := range c.configMaps { configMap, err := c.configMapLister.ConfigMaps(c.namespace).Get(cm.Name) switch { @@ -101,11 +102,14 @@ func (c *CertSyncController) sync() error { // remove missing content if err := os.RemoveAll(getConfigMapDir(c.destinationDir, cm.Name)); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) } + c.eventRecorder.Eventf("CertificateRemoved", "Removed file for configmap: %s/%s", configMap.Namespace, configMap.Name) continue case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -138,6 +142,7 @@ func (c *CertSyncController) sync() error { configMap, err = c.configmapGetter.Get(configMap.Name, metav1.GetOptions{}) if err != nil { // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -150,6 +155,7 @@ func (c *CertSyncController) sync() error { klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } @@ -162,12 +168,15 @@ func (c *CertSyncController) sync() error { klog.Infof("Writing configmap manifest %q ...", fullFilename) if err := ioutil.WriteFile(fullFilename, []byte(content), 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for configmap: %s/%s: %v", configMap.Namespace, configMap.Name, err) errors = append(errors, err) continue } } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated configmap: %s/%s", configMap.Namespace, configMap.Name) } + klog.Infof("Syncing secrets: %v", c.secrets) for _, s := range c.secrets { secret, err := c.secretLister.Secrets(c.namespace).Get(s.Name) switch { @@ -190,11 +199,14 @@ func (c *CertSyncController) sync() error { // remove missing content if err := os.RemoveAll(getSecretDir(c.destinationDir, s.Name)); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed removing file for secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) } + c.eventRecorder.Warningf("CertificateRemoved", "Removed file for secret: %s/%s", secret.Namespace, secret.Name, err) continue case err != nil: + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -227,6 +239,7 @@ func (c *CertSyncController) sync() error { secret, err = c.secretGetter.Get(secret.Name, metav1.GetOptions{}) if err != nil { // Even if the error is not exists we will act on it when caches catch up + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed getting secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -239,6 +252,7 @@ func (c *CertSyncController) sync() error { klog.Infof("Creating directory %q ...", contentDir) if err := os.MkdirAll(contentDir, 0755); err != nil && !os.IsExist(err) { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed creating directory for secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } @@ -252,10 +266,12 @@ func (c *CertSyncController) sync() error { klog.Infof("Writing secret manifest %q ...", fullFilename) if err := ioutil.WriteFile(fullFilename, content, 0644); err != nil { + c.eventRecorder.Warningf("CertificateUpdateFailed", "Failed writing file for secret: %s/%s: %v", secret.Namespace, secret.Name, err) errors = append(errors, err) continue } } + c.eventRecorder.Eventf("CertificateUpdated", "Wrote updated secret: %s/%s", secret.Namespace, secret.Name) } return utilerrors.NewAggregate(errors) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go new file mode 100644 index 000000000..2bcc5f710 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller.go @@ -0,0 +1,254 @@ +package installerstate + +import ( + "fmt" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +const installerStateControllerWorkQueueKey = "key" + +// maxToleratedPodPendingDuration is the maximum time we tolerate installer pod in pending state +var maxToleratedPodPendingDuration = 5 * time.Minute + +type InstallerStateController struct { + podsGetter corev1client.PodsGetter + eventsGetter corev1client.EventsGetter + queue workqueue.RateLimitingInterface + cachesToSync []cache.InformerSynced + targetNamespace string + operatorClient v1helpers.StaticPodOperatorClient + eventRecorder events.Recorder + + timeNowFn func() time.Time +} + +func NewInstallerStateController(kubeInformersForTargetNamespace informers.SharedInformerFactory, + podsGetter corev1client.PodsGetter, + eventsGetter corev1client.EventsGetter, + operatorClient v1helpers.StaticPodOperatorClient, + targetNamespace string, + recorder events.Recorder, +) *InstallerStateController { + c := &InstallerStateController{ + podsGetter: podsGetter, + eventsGetter: eventsGetter, + targetNamespace: targetNamespace, + operatorClient: operatorClient, + eventRecorder: recorder.WithComponentSuffix("installer-state-controller"), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "InstallerStateController"), + timeNowFn: time.Now, + } + + c.cachesToSync = append(c.cachesToSync, kubeInformersForTargetNamespace.Core().V1().Pods().Informer().HasSynced) + kubeInformersForTargetNamespace.Core().V1().Pods().Informer().AddEventHandler(c.eventHandler()) + + return c +} + +func (c *InstallerStateController) eventHandler() cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + UpdateFunc: func(old, new interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + DeleteFunc: func(obj interface{}) { c.queue.Add(installerStateControllerWorkQueueKey) }, + } +} + +// degradedConditionNames lists all supported condition types. +var degradedConditionNames = []string{ + "InstallerPodPendingDegraded", + "InstallerPodContainerWaitingDegraded", + "InstallerPodNetworkingDegraded", +} + +func (c *InstallerStateController) sync() error { + pods, err := c.podsGetter.Pods(c.targetNamespace).List(metav1.ListOptions{ + LabelSelector: labels.SelectorFromSet(labels.Set{"app": "installer"}).String(), + }) + if err != nil { + return err + } + + // collect all startingObjects that are in pending state for longer than maxToleratedPodPendingDuration + pendingPods := []*v1.Pod{} + for _, pod := range pods.Items { + if pod.Status.Phase != v1.PodPending || pod.Status.StartTime == nil { + continue + } + if c.timeNowFn().Sub(pod.Status.StartTime.Time) >= maxToleratedPodPendingDuration { + pendingPods = append(pendingPods, pod.DeepCopy()) + } + } + + // in theory, there should never be two installer startingObjects pending as we don't roll new installer pod + // until the previous/existing pod has finished its job. + foundConditions := []operatorv1.OperatorCondition{} + foundConditions = append(foundConditions, c.handlePendingInstallerPods(pendingPods)...) + + // handle networking conditions that are based on events + networkConditions, err := c.handlePendingInstallerPodsNetworkEvents(pendingPods) + if err != nil { + return err + } + foundConditions = append(foundConditions, networkConditions...) + + updateConditionFuncs := []v1helpers.UpdateStaticPodStatusFunc{} + + // check the supported degraded foundConditions and check if any pending pod matching them. + for _, degradedConditionName := range degradedConditionNames { + // clean up existing foundConditions + updatedCondition := operatorv1.OperatorCondition{ + Type: degradedConditionName, + Status: operatorv1.ConditionFalse, + } + if condition := v1helpers.FindOperatorCondition(foundConditions, degradedConditionName); condition != nil { + updatedCondition = *condition + } + updateConditionFuncs = append(updateConditionFuncs, v1helpers.UpdateStaticPodConditionFn(updatedCondition)) + } + + if _, _, err := v1helpers.UpdateStaticPodStatus(c.operatorClient, updateConditionFuncs...); err != nil { + return err + } + + return nil +} + +func (c *InstallerStateController) handlePendingInstallerPodsNetworkEvents(pods []*v1.Pod) ([]operatorv1.OperatorCondition, error) { + conditions := []operatorv1.OperatorCondition{} + if len(pods) == 0 { + return conditions, nil + } + namespaceEvents, err := c.eventsGetter.Events(c.targetNamespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + for _, event := range namespaceEvents.Items { + if event.InvolvedObject.Kind != "Pod" { + continue + } + if !strings.Contains(event.Message, "failed to create pod network") { + continue + } + for _, pod := range pods { + if pod.Name != event.InvolvedObject.Name { + continue + } + // If we already find the pod that is pending because of the networking problem, skip other pods. + // This will reduce the events we fire. + if v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") != nil { + break + } + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodNetworkingDegraded", + Status: operatorv1.ConditionTrue, + Reason: event.Reason, + Message: fmt.Sprintf("Pod %q on node %q observed degraded networking: %s", pod.Name, pod.Spec.NodeName, event.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + return conditions, nil +} + +func (c *InstallerStateController) handlePendingInstallerPods(pods []*v1.Pod) []operatorv1.OperatorCondition { + conditions := []operatorv1.OperatorCondition{} + for _, pod := range pods { + // at this point we already know the pod is pending for longer than expected + pendingTime := c.timeNowFn().Sub(pod.Status.StartTime.Time) + + // the pod is in the pending state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + if len(pod.Status.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodPendingDegraded", + Reason: pod.Status.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q is Pending for %s because %s", pod.Name, pod.Spec.NodeName, pendingTime, pod.Status.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + + // one or more containers are in waiting state for longer than maxToleratedPodPendingDuration, report the reason and message + // as degraded condition for the operator. + for _, containerStatus := range pod.Status.ContainerStatuses { + if containerStatus.State.Waiting == nil { + continue + } + if state := containerStatus.State.Waiting; len(state.Reason) > 0 { + condition := operatorv1.OperatorCondition{ + Type: "InstallerPodContainerWaitingDegraded", + Reason: state.Reason, + Status: operatorv1.ConditionTrue, + Message: fmt.Sprintf("Pod %q on node %q container %q is waiting for %s because %s", pod.Name, pod.Spec.NodeName, containerStatus.Name, pendingTime, state.Message), + } + conditions = append(conditions, condition) + c.eventRecorder.Warningf(condition.Reason, condition.Message) + } + } + } + + return conditions +} + +// Run starts the kube-apiserver and blocks until stopCh is closed. +func (c *InstallerStateController) Run(workers int, stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting InstallerStateController") + defer klog.Infof("Shutting down InstallerStateController") + if !cache.WaitForCacheSync(stopCh, c.cachesToSync...) { + return + } + + // doesn't matter what workers say, only start one. + go wait.Until(c.runWorker, time.Second, stopCh) + + // add time based trigger + go wait.Until(func() { c.queue.Add(installerStateControllerWorkQueueKey) }, time.Minute, stopCh) + + <-stopCh +} + +func (c *InstallerStateController) runWorker() { + for c.processNextWorkItem() { + } +} + +func (c *InstallerStateController) processNextWorkItem() bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.sync() + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go new file mode 100644 index 000000000..c6e39856f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate/installer_state_controller_test.go @@ -0,0 +1,177 @@ +package installerstate + +import ( + "testing" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/operator/events/eventstesting" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +func newInstallerPod(name string, mutateStatusFn func(*corev1.PodStatus)) *corev1.Pod { + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "test", + Labels: map[string]string{"app": "installer"}, + }, + Spec: corev1.PodSpec{}, + Status: corev1.PodStatus{}, + } + mutateStatusFn(&pod.Status) + return pod +} + +func newInstallerPodNetworkEvent(mutateFn func(*corev1.Event)) *corev1.Event { + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName("test"), + Namespace: "test", + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "Pod", + Name: "installer-1", + }, + Reason: "FailedCreatePodSandBox", + Message: `'(combined from similar events): Failed create pod sandbox: rpc error: + code = Unknown desc = failed to create pod network sandbox k8s_installer-5-control-plane-1_openshift-kube-apiserver_900db7f3-d2ce-11e9-8fc8-005056be0641_0(121698f4862fd67157ca586cab18aefb048fe5d7b3bd87516098ac0e91a90a13): + Multus: Err adding pod to network "openshift-sdn": Multus: error in invoke Delegate + add - "openshift-sdn": failed to send CNI request: Post http://dummy/: dial unix + /var/run/openshift-sdn/cniserver/socket: connect: connection refused'`, + } + if mutateFn != nil { + mutateFn(event) + } + return event +} + +func TestInstallerStateController(t *testing.T) { + tests := []struct { + name string + startingObjects []runtime.Object + evalConditions func(t *testing.T, conditions []operatorv1.OperatorCondition) + }{ + { + name: "should report pending pod", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + { + name: "should report pod with failing networking", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + newInstallerPodNetworkEvent(nil), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodNetworkingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodNetworkingDegraded condition to be True") + } + }, + }, + { + name: "should report pending pod with waiting container", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodPending + status.Reason = "PendingReason" + status.Message = "PendingMessage" + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + status.ContainerStatuses = append(status.ContainerStatuses, corev1.ContainerStatus{Name: "test", State: corev1.ContainerState{Waiting: &corev1.ContainerStateWaiting{ + Reason: "PodInitializing", + Message: "initializing error", + }}}) + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionTrue { + t.Errorf("expected InstallerPodPendingDegraded condition to be True") + } + }, + }, + { + name: "should report false when no pending startingObjects", + startingObjects: []runtime.Object{ + newInstallerPod("installer-1", func(status *corev1.PodStatus) { + status.Phase = corev1.PodRunning + status.StartTime = &metav1.Time{Time: time.Now().Add(-(maxToleratedPodPendingDuration + 5*time.Minute))} + }), + }, + evalConditions: func(t *testing.T, conditions []operatorv1.OperatorCondition) { + podPendingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodPendingDegraded") + if podPendingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + podContainerWaitingCondition := v1helpers.FindOperatorCondition(conditions, "InstallerPodContainerWaitingDegraded") + if podContainerWaitingCondition.Status != operatorv1.ConditionFalse { + t.Errorf("expected InstallerPodPendingDegraded condition to be False") + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(tt.startingObjects...) + kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeClient, 1*time.Minute, informers.WithNamespace("test")) + stopCh := make(chan struct{}) + go kubeInformers.Start(stopCh) + defer close(stopCh) + + fakeStaticPodOperatorClient := v1helpers.NewFakeStaticPodOperatorClient(&operatorv1.StaticPodOperatorSpec{}, &operatorv1.StaticPodOperatorStatus{}, nil, nil) + eventRecorder := eventstesting.NewTestingEventRecorder(t) + controller := NewInstallerStateController(kubeInformers, kubeClient.CoreV1(), kubeClient.CoreV1(), fakeStaticPodOperatorClient, "test", eventRecorder) + if err := controller.sync(); err != nil { + t.Error(err) + return + } + + _, status, _, err := fakeStaticPodOperatorClient.GetOperatorState() + if err != nil { + t.Error(err) + return + } + if tt.evalConditions != nil { + tt.evalConditions(t, status.Conditions) + } + }) + } + +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go index 6071d035e..f694da0e6 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controller/revision/revision_controller.go @@ -193,7 +193,7 @@ func (c RevisionController) isLatestRevisionCurrent(revision int32) (bool, strin } if !equality.Semantic.DeepEqual(existingData, requiredData) { if klog.V(4) { - klog.Infof("secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatch(existing, required)) + klog.Infof("Secret %q changes for revision %d: %s", s.Name, revision, resourceapply.JSONPatchSecret(existing, required)) } secretChanges = append(secretChanges, fmt.Sprintf("secret/%s has changed", s.Name)) } diff --git a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go index f17b19871..00db28341 100644 --- a/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go +++ b/vendor/github.com/openshift/library-go/pkg/operator/staticpod/controllers.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/openshift/library-go/pkg/operator/loglevel" + "github.com/openshift/library-go/pkg/operator/staticpod/controller/installerstate" "github.com/openshift/library-go/pkg/operator/unsupportedconfigoverridescontroller" @@ -138,6 +139,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator configMapClient := v1helpers.CachedConfigMapGetter(b.kubeClient.CoreV1(), b.kubeInformers) secretClient := v1helpers.CachedSecretGetter(b.kubeClient.CoreV1(), b.kubeInformers) podClient := b.kubeClient.CoreV1() + eventsClient := b.kubeClient.CoreV1() operandInformers := b.kubeInformers.InformersFor(b.operandNamespace) clusterInformers := b.kubeInformers.InformersFor("") @@ -172,6 +174,14 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator b.certConfigMaps, b.certSecrets, ) + controllers.installerStateController = installerstate.NewInstallerStateController( + operandInformers, + podClient, + eventsClient, + b.staticPodOperatorClient, + b.operandNamespace, + eventRecorder, + ) } if len(b.operandName) > 0 { @@ -239,6 +249,9 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator if controllers.installerController == nil { errs = append(errs, fmt.Errorf("missing installerController; cannot proceed")) } + if controllers.installerStateController == nil { + errs = append(errs, fmt.Errorf("missing installerStateController; cannot proceed")) + } if controllers.staticPodStateController == nil { eventRecorder.Warning("StaticPodStateControllerMissing", "not enough information provided, not all functionality is present") } @@ -255,6 +268,7 @@ func (b *staticPodOperatorControllerBuilder) ToControllers() (*staticPodOperator type staticPodOperatorControllers struct { revisionController *revision.RevisionController installerController *installer.InstallerController + installerStateController *installerstate.InstallerStateController staticPodStateController *staticpodstate.StaticPodStateController pruneController *prune.PruneController nodeController *node.NodeController @@ -272,6 +286,7 @@ func (o *staticPodOperatorControllers) WithInstallerPodMutationFn(installerPodMu func (o *staticPodOperatorControllers) Run(stopCh <-chan struct{}) { go o.revisionController.Run(1, stopCh) go o.installerController.Run(1, stopCh) + go o.installerStateController.Run(1, stopCh) go o.staticPodStateController.Run(1, stopCh) go o.pruneController.Run(1, stopCh) go o.nodeController.Run(1, stopCh) diff --git a/vendor/github.com/prometheus/client_model/go.mod b/vendor/github.com/prometheus/client_model/go.mod deleted file mode 100644 index e7e0a86cc..000000000 --- a/vendor/github.com/prometheus/client_model/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/prometheus/client_model - -go 1.9 - -require ( - github.com/golang/protobuf v1.2.0 - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect -) diff --git a/vendor/github.com/prometheus/client_model/go.sum b/vendor/github.com/prometheus/client_model/go.sum deleted file mode 100644 index 9c84c9068..000000000 --- a/vendor/github.com/prometheus/client_model/go.sum +++ /dev/null @@ -1,4 +0,0 @@ -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= diff --git a/vendor/github.com/prometheus/client_model/ruby/README.md b/vendor/github.com/prometheus/client_model/ruby/README.md index d15b59970..c45fcc7a9 100644 --- a/vendor/github.com/prometheus/client_model/ruby/README.md +++ b/vendor/github.com/prometheus/client_model/ruby/README.md @@ -2,6 +2,10 @@ Data model artifacts for the [Prometheus Ruby client][1]. +## Installation + + gem install prometheus-client-model + ## Usage Build the artifacts from the protobuf specification: @@ -18,10 +22,8 @@ require 'prometheus/client/model' CONTENT_TYPE = 'application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited' -content = open('http://localhost:9100/metrics', 'Accept' => CONTENT_TYPE).read -buffer = Beefcake::Buffer.new(content) - -while family = Prometheus::Client::MetricFamily.read_delimited(buffer) +stream = open('http://localhost:9090/metrics', 'Accept' => CONTENT_TYPE).read +while family = Prometheus::Client::MetricFamily.read_delimited(stream) puts family end ``` diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml index e863e26c8..2fe8e9ad7 100644 --- a/vendor/github.com/prometheus/common/.travis.yml +++ b/vendor/github.com/prometheus/common/.travis.yml @@ -1,14 +1,6 @@ +sudo: false + language: go -# Supported Go versions are synced with github.com/prometheus/client_golang. go: - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x - -script: - - make test - # style is only checked against the latest supported Go version. - - if [[ $TRAVIS_GO_VERSION =~ ^1\.(12)\. ]]; then make style; fi + - 1.7.5 + - tip diff --git a/vendor/github.com/prometheus/common/Makefile b/vendor/github.com/prometheus/common/Makefile deleted file mode 100644 index 69374986c..000000000 --- a/vendor/github.com/prometheus/common/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -include Makefile.common - -.PHONY: test -test:: deps check_license unused common-test - -ifeq (,$(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(7|8|9)\.')) -test:: staticcheck -endif diff --git a/vendor/github.com/prometheus/common/Makefile.common b/vendor/github.com/prometheus/common/Makefile.common deleted file mode 100644 index 873964fb4..000000000 --- a/vendor/github.com/prometheus/common/Makefile.common +++ /dev/null @@ -1,269 +0,0 @@ -# Copyright 2018 The Prometheus Authors -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# A common Makefile that includes rules to be reused in different prometheus projects. -# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository! - -# Example usage : -# Create the main Makefile in the root project directory. -# include Makefile.common -# customTarget: -# @echo ">> Running customTarget" -# - -# Ensure GOBIN is not set during build so that promu is installed to the correct path -unexport GOBIN - -GO ?= go -GOFMT ?= $(GO)fmt -FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) -GOOPTS ?= -GOHOSTOS ?= $(shell $(GO) env GOHOSTOS) -GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH) - -GO_VERSION ?= $(shell $(GO) version) -GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION)) -PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.') - -GOVENDOR := -GO111MODULE := -ifeq (, $(PRE_GO_111)) - ifneq (,$(wildcard go.mod)) - # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI). - GO111MODULE := on - - ifneq (,$(wildcard vendor)) - # Always use the local vendor/ directory to satisfy the dependencies. - GOOPTS := $(GOOPTS) -mod=vendor - endif - endif -else - ifneq (,$(wildcard go.mod)) - ifneq (,$(wildcard vendor)) -$(warning This repository requires Go >= 1.11 because of Go modules) -$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)') - endif - else - # This repository isn't using Go modules (yet). - GOVENDOR := $(FIRST_GOPATH)/bin/govendor - endif -endif -PROMU := $(FIRST_GOPATH)/bin/promu -pkgs = ./... - -ifeq (arm, $(GOHOSTARCH)) - GOHOSTARM ?= $(shell GOARM= $(GO) env GOARM) - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH)v$(GOHOSTARM) -else - GO_BUILD_PLATFORM ?= $(GOHOSTOS)-$(GOHOSTARCH) -endif - -PROMU_VERSION ?= 0.3.0 -PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz - -STATICCHECK := -# staticcheck only supports linux, freebsd, darwin and windows platforms on i386/amd64 -# windows isn't included here because of the path separator being different. -ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin)) - ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386)) - STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck - STATICCHECK_VERSION ?= 2019.1 - STATICCHECK_URL := https://github.com/dominikh/go-tools/releases/download/$(STATICCHECK_VERSION)/staticcheck_$(GOHOSTOS)_$(GOHOSTARCH) - endif -endif - -PREFIX ?= $(shell pwd) -BIN_DIR ?= $(shell pwd) -DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD)) -DOCKER_REPO ?= prom - -DOCKER_ARCHS ?= amd64 - -BUILD_DOCKER_ARCHS = $(addprefix common-docker-,$(DOCKER_ARCHS)) -PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS)) -TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS)) - -ifeq ($(GOHOSTARCH),amd64) - ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows)) - # Only supported on amd64 - test-flags := -race - endif -endif - -# This rule is used to forward a target like "build" to "common-build". This -# allows a new "build" target to be defined in a Makefile which includes this -# one and override "common-build" without override warnings. -%: common-% ; - -.PHONY: common-all -common-all: precheck style check_license staticcheck unused build test - -.PHONY: common-style -common-style: - @echo ">> checking code style" - @fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \ - if [ -n "$${fmtRes}" ]; then \ - echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \ - echo "Please ensure you are using $$($(GO) version) for formatting code."; \ - exit 1; \ - fi - -.PHONY: common-check_license -common-check_license: - @echo ">> checking license header" - @licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \ - awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \ - done); \ - if [ -n "$${licRes}" ]; then \ - echo "license header checking failed:"; echo "$${licRes}"; \ - exit 1; \ - fi - -.PHONY: common-deps -common-deps: - @echo ">> getting dependencies" -ifdef GO111MODULE - GO111MODULE=$(GO111MODULE) $(GO) mod download -else - $(GO) get $(GOOPTS) -t ./... -endif - -.PHONY: common-test-short -common-test-short: - @echo ">> running short tests" - GO111MODULE=$(GO111MODULE) $(GO) test -short $(GOOPTS) $(pkgs) - -.PHONY: common-test -common-test: - @echo ">> running all tests" - GO111MODULE=$(GO111MODULE) $(GO) test $(test-flags) $(GOOPTS) $(pkgs) - -.PHONY: common-format -common-format: - @echo ">> formatting code" - GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs) - -.PHONY: common-vet -common-vet: - @echo ">> vetting code" - GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs) - -.PHONY: common-staticcheck -common-staticcheck: $(STATICCHECK) -ifdef STATICCHECK - @echo ">> running staticcheck" - chmod +x $(STATICCHECK) -ifdef GO111MODULE -# 'go list' needs to be executed before staticcheck to prepopulate the modules cache. -# Otherwise staticcheck might fail randomly for some reason not yet explained. - GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null - GO111MODULE=$(GO111MODULE) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) -else - $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) -endif -endif - -.PHONY: common-unused -common-unused: $(GOVENDOR) -ifdef GOVENDOR - @echo ">> running check for unused packages" - @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages' -else -ifdef GO111MODULE - @echo ">> running check for unused/missing packages in go.mod" - GO111MODULE=$(GO111MODULE) $(GO) mod tidy -ifeq (,$(wildcard vendor)) - @git diff --exit-code -- go.sum go.mod -else - @echo ">> running check for unused packages in vendor/" - GO111MODULE=$(GO111MODULE) $(GO) mod vendor - @git diff --exit-code -- go.sum go.mod vendor/ -endif -endif -endif - -.PHONY: common-build -common-build: promu - @echo ">> building binaries" - GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) - -.PHONY: common-tarball -common-tarball: promu - @echo ">> building release tarball" - $(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR) - -.PHONY: common-docker $(BUILD_DOCKER_ARCHS) -common-docker: $(BUILD_DOCKER_ARCHS) -$(BUILD_DOCKER_ARCHS): common-docker-%: - docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \ - --build-arg ARCH="$*" \ - --build-arg OS="linux" \ - . - -.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS) -common-docker-publish: $(PUBLISH_DOCKER_ARCHS) -$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%: - docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" - -.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS) -common-docker-tag-latest: $(TAG_DOCKER_ARCHS) -$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%: - docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest" - -.PHONY: common-docker-manifest -common-docker-manifest: - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG)) - DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" - -.PHONY: promu -promu: $(PROMU) - -$(PROMU): - $(eval PROMU_TMP := $(shell mktemp -d)) - curl -s -L $(PROMU_URL) | tar -xvzf - -C $(PROMU_TMP) - mkdir -p $(FIRST_GOPATH)/bin - cp $(PROMU_TMP)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM)/promu $(FIRST_GOPATH)/bin/promu - rm -r $(PROMU_TMP) - -.PHONY: proto -proto: - @echo ">> generating code from proto files" - @./scripts/genproto.sh - -ifdef STATICCHECK -$(STATICCHECK): - mkdir -p $(FIRST_GOPATH)/bin - curl -s -L $(STATICCHECK_URL) > $(STATICCHECK) -endif - -ifdef GOVENDOR -.PHONY: $(GOVENDOR) -$(GOVENDOR): - GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor -endif - -.PHONY: precheck -precheck:: - -define PRECHECK_COMMAND_template = -precheck:: $(1)_precheck - -PRECHECK_COMMAND_$(1) ?= $(1) $$(strip $$(PRECHECK_OPTIONS_$(1))) -.PHONY: $(1)_precheck -$(1)_precheck: - @if ! $$(PRECHECK_COMMAND_$(1)) 1>/dev/null 2>&1; then \ - echo "Execution of '$$(PRECHECK_COMMAND_$(1))' command failed. Is $(1) installed?"; \ - exit 1; \ - fi -endef diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md index 6114f8d59..47985e4ad 100644 --- a/vendor/github.com/prometheus/common/README.md +++ b/vendor/github.com/prometheus/common/README.md @@ -9,5 +9,4 @@ components and libraries. * **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) * **model**: Shared data structures * **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` -* **server**: Common servers * **version**: Version information and metrics diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 21bfa22bf..da5d59014 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -11,13 +11,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - package config import ( - "bytes" - "crypto/md5" "crypto/tls" "crypto/x509" "fmt" @@ -25,17 +21,12 @@ import ( "net/http" "net/url" "strings" - "sync" "time" "github.com/mwitkow/go-conntrack" "gopkg.in/yaml.v2" ) -type closeIdler interface { - CloseIdleConnections() -} - // BasicAuth contains basic HTTP authentication credentials. type BasicAuth struct { Username string `yaml:"username"` @@ -133,53 +124,42 @@ func NewClientFromConfig(cfg HTTPClientConfig, name string) (*http.Client, error // NewRoundTripperFromConfig returns a new HTTP RoundTripper configured for the // given config.HTTPClientConfig. The name is used as go-conntrack metric label. func NewRoundTripperFromConfig(cfg HTTPClientConfig, name string) (http.RoundTripper, error) { - newRT := func(tlsConfig *tls.Config) (http.RoundTripper, error) { - // The only timeout we care about is the configured scrape timeout. - // It is applied on request. So we leave out any timings here. - var rt http.RoundTripper = &http.Transport{ - Proxy: http.ProxyURL(cfg.ProxyURL.URL), - MaxIdleConns: 20000, - MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 - DisableKeepAlives: false, - TLSClientConfig: tlsConfig, - DisableCompression: true, - // 5 minutes is typically above the maximum sane scrape interval. So we can - // use keepalive for all configurations. - IdleConnTimeout: 5 * time.Minute, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - DialContext: conntrack.NewDialContextFunc( - conntrack.DialWithTracing(), - conntrack.DialWithName(name), - ), - } - - // If a bearer token is provided, create a round tripper that will set the - // Authorization header correctly on each request. - if len(cfg.BearerToken) > 0 { - rt = NewBearerAuthRoundTripper(cfg.BearerToken, rt) - } else if len(cfg.BearerTokenFile) > 0 { - rt = NewBearerAuthFileRoundTripper(cfg.BearerTokenFile, rt) - } - - if cfg.BasicAuth != nil { - rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt) - } - // Return a new configured RoundTripper. - return rt, nil - } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig) if err != nil { return nil, err } + // The only timeout we care about is the configured scrape timeout. + // It is applied on request. So we leave out any timings here. + var rt http.RoundTripper = &http.Transport{ + Proxy: http.ProxyURL(cfg.ProxyURL.URL), + MaxIdleConns: 20000, + MaxIdleConnsPerHost: 1000, // see https://github.com/golang/go/issues/13801 + DisableKeepAlives: false, + TLSClientConfig: tlsConfig, + DisableCompression: true, + // 5 minutes is typically above the maximum sane scrape interval. So we can + // use keepalive for all configurations. + IdleConnTimeout: 5 * time.Minute, + DialContext: conntrack.NewDialContextFunc( + conntrack.DialWithTracing(), + conntrack.DialWithName(name), + ), + } + + // If a bearer token is provided, create a round tripper that will set the + // Authorization header correctly on each request. + if len(cfg.BearerToken) > 0 { + rt = NewBearerAuthRoundTripper(cfg.BearerToken, rt) + } else if len(cfg.BearerTokenFile) > 0 { + rt = NewBearerAuthFileRoundTripper(cfg.BearerTokenFile, rt) + } - if len(cfg.TLSConfig.CAFile) == 0 { - // No need for a RoundTripper that reloads the CA file automatically. - return newRT(tlsConfig) + if cfg.BasicAuth != nil { + rt = NewBasicAuthRoundTripper(cfg.BasicAuth.Username, cfg.BasicAuth.Password, cfg.BasicAuth.PasswordFile, rt) } - return newTLSRoundTripper(tlsConfig, cfg.TLSConfig.CAFile, newRT) + // Return a new configured RoundTripper. + return rt, nil } type bearerAuthRoundTripper struct { @@ -201,12 +181,6 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, return rt.rt.RoundTrip(req) } -func (rt *bearerAuthRoundTripper) CloseIdleConnections() { - if ci, ok := rt.rt.(closeIdler); ok { - ci.CloseIdleConnections() - } -} - type bearerAuthFileRoundTripper struct { bearerFile string rt http.RoundTripper @@ -233,12 +207,6 @@ func (rt *bearerAuthFileRoundTripper) RoundTrip(req *http.Request) (*http.Respon return rt.rt.RoundTrip(req) } -func (rt *bearerAuthFileRoundTripper) CloseIdleConnections() { - if ci, ok := rt.rt.(closeIdler); ok { - ci.CloseIdleConnections() - } -} - type basicAuthRoundTripper struct { username string password Secret @@ -269,12 +237,6 @@ func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, e return rt.rt.RoundTrip(req) } -func (rt *basicAuthRoundTripper) CloseIdleConnections() { - if ci, ok := rt.rt.(closeIdler); ok { - ci.CloseIdleConnections() - } -} - // cloneRequest returns a clone of the provided *http.Request. // The clone is a shallow copy of the struct and its Header map. func cloneRequest(r *http.Request) *http.Request { @@ -296,13 +258,14 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { // If a CA cert is provided then let's read it in so we can validate the // scrape target's certificate properly. if len(cfg.CAFile) > 0 { - b, err := readCAFile(cfg.CAFile) + caCertPool := x509.NewCertPool() + // Load CA cert. + caCert, err := ioutil.ReadFile(cfg.CAFile) if err != nil { - return nil, err - } - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", cfg.CAFile) + return nil, fmt.Errorf("unable to use specified CA cert %s: %s", cfg.CAFile, err) } + caCertPool.AppendCertsFromPEM(caCert) + tlsConfig.RootCAs = caCertPool } if len(cfg.ServerName) > 0 { @@ -314,12 +277,13 @@ func NewTLSConfig(cfg *TLSConfig) (*tls.Config, error) { } else if len(cfg.KeyFile) > 0 && len(cfg.CertFile) == 0 { return nil, fmt.Errorf("client key file %q specified without client cert file", cfg.KeyFile) } else if len(cfg.CertFile) > 0 && len(cfg.KeyFile) > 0 { - // Verify that client cert and key are valid. - if _, err := cfg.getClientCertificate(nil); err != nil { - return nil, err + cert, err := tls.LoadX509KeyPair(cfg.CertFile, cfg.KeyFile) + if err != nil { + return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", cfg.CertFile, cfg.KeyFile, err) } - tlsConfig.GetClientCertificate = cfg.getClientCertificate + tlsConfig.Certificates = []tls.Certificate{cert} } + tlsConfig.BuildNameToCertificate() return tlsConfig, nil } @@ -344,125 +308,6 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return unmarshal((*plain)(c)) } -// getClientCertificate reads the pair of client cert and key from disk and returns a tls.Certificate. -func (c *TLSConfig) getClientCertificate(*tls.CertificateRequestInfo) (*tls.Certificate, error) { - cert, err := tls.LoadX509KeyPair(c.CertFile, c.KeyFile) - if err != nil { - return nil, fmt.Errorf("unable to use specified client cert (%s) & key (%s): %s", c.CertFile, c.KeyFile, err) - } - return &cert, nil -} - -// readCAFile reads the CA cert file from disk. -func readCAFile(f string) ([]byte, error) { - data, err := ioutil.ReadFile(f) - if err != nil { - return nil, fmt.Errorf("unable to load specified CA cert %s: %s", f, err) - } - return data, nil -} - -// updateRootCA parses the given byte slice as a series of PEM encoded certificates and updates tls.Config.RootCAs. -func updateRootCA(cfg *tls.Config, b []byte) bool { - caCertPool := x509.NewCertPool() - if !caCertPool.AppendCertsFromPEM(b) { - return false - } - cfg.RootCAs = caCertPool - return true -} - -// tlsRoundTripper is a RoundTripper that updates automatically its TLS -// configuration whenever the content of the CA file changes. -type tlsRoundTripper struct { - caFile string - // newRT returns a new RoundTripper. - newRT func(*tls.Config) (http.RoundTripper, error) - - mtx sync.RWMutex - rt http.RoundTripper - hashCAFile []byte - tlsConfig *tls.Config -} - -func newTLSRoundTripper( - cfg *tls.Config, - caFile string, - newRT func(*tls.Config) (http.RoundTripper, error), -) (http.RoundTripper, error) { - t := &tlsRoundTripper{ - caFile: caFile, - newRT: newRT, - tlsConfig: cfg, - } - - rt, err := t.newRT(t.tlsConfig) - if err != nil { - return nil, err - } - t.rt = rt - - _, t.hashCAFile, err = t.getCAWithHash() - if err != nil { - return nil, err - } - - return t, nil -} - -func (t *tlsRoundTripper) getCAWithHash() ([]byte, []byte, error) { - b, err := readCAFile(t.caFile) - if err != nil { - return nil, nil, err - } - h := md5.Sum(b) - return b, h[:], nil - -} - -// RoundTrip implements the http.RoundTrip interface. -func (t *tlsRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - b, h, err := t.getCAWithHash() - if err != nil { - return nil, err - } - - t.mtx.RLock() - equal := bytes.Equal(h[:], t.hashCAFile) - rt := t.rt - t.mtx.RUnlock() - if equal { - // The CA cert hasn't changed, use the existing RoundTripper. - return rt.RoundTrip(req) - } - - // Create a new RoundTripper. - tlsConfig := t.tlsConfig.Clone() - if !updateRootCA(tlsConfig, b) { - return nil, fmt.Errorf("unable to use specified CA cert %s", t.caFile) - } - rt, err = t.newRT(tlsConfig) - if err != nil { - return nil, err - } - t.CloseIdleConnections() - - t.mtx.Lock() - t.rt = rt - t.hashCAFile = h[:] - t.mtx.Unlock() - - return rt.RoundTrip(req) -} - -func (t *tlsRoundTripper) CloseIdleConnections() { - t.mtx.RLock() - defer t.mtx.RUnlock() - if ci, ok := t.rt.(closeIdler); ok { - ci.CloseIdleConnections() - } -} - func (c HTTPClientConfig) String() string { b, err := yaml.Marshal(c) if err != nil { diff --git a/vendor/github.com/prometheus/common/config/http_config_test.go b/vendor/github.com/prometheus/common/config/http_config_test.go index fdd3fb13c..698ea38f2 100644 --- a/vendor/github.com/prometheus/common/config/http_config_test.go +++ b/vendor/github.com/prometheus/common/config/http_config_test.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - package config import ( @@ -22,29 +20,19 @@ import ( "io/ioutil" "net/http" "net/http/httptest" - "os" - "path/filepath" "reflect" - "strconv" "strings" - "sync" - "sync/atomic" "testing" - "time" - yaml "gopkg.in/yaml.v2" + "gopkg.in/yaml.v2" ) const ( TLSCAChainPath = "testdata/tls-ca-chain.pem" ServerCertificatePath = "testdata/server.crt" ServerKeyPath = "testdata/server.key" - ClientCertificatePath = "testdata/client.crt" - ClientKeyNoPassPath = "testdata/client-no-pass.key" - InvalidCA = "testdata/client-no-pass.key" - WrongClientCertPath = "testdata/self-signed-client.crt" - WrongClientKeyPath = "testdata/self-signed-client.key" - EmptyFile = "testdata/empty" + BarneyCertificatePath = "testdata/barney.crt" + BarneyKeyNoPassPath = "testdata/barney-no-pass.key" MissingCA = "missing/ca.crt" MissingCert = "missing/cert.crt" MissingKey = "missing/secret.key" @@ -113,8 +101,8 @@ func TestNewClientFromConfig(t *testing.T) { clientConfig: HTTPClientConfig{ TLSConfig: TLSConfig{ CAFile: "", - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: true}, }, @@ -125,8 +113,8 @@ func TestNewClientFromConfig(t *testing.T) { clientConfig: HTTPClientConfig{ TLSConfig: TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, }, @@ -138,8 +126,8 @@ func TestNewClientFromConfig(t *testing.T) { BearerToken: BearerToken, TLSConfig: TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, }, @@ -157,8 +145,8 @@ func TestNewClientFromConfig(t *testing.T) { BearerTokenFile: BearerTokenFile, TLSConfig: TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, }, @@ -179,8 +167,8 @@ func TestNewClientFromConfig(t *testing.T) { }, TLSConfig: TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, }, @@ -241,17 +229,12 @@ func TestNewClientFromInvalidConfig(t *testing.T) { clientConfig: HTTPClientConfig{ TLSConfig: TLSConfig{ CAFile: MissingCA, + CertFile: "", + KeyFile: "", + ServerName: "", InsecureSkipVerify: true}, }, - errorMsg: fmt.Sprintf("unable to load specified CA cert %s:", MissingCA), - }, - { - clientConfig: HTTPClientConfig{ - TLSConfig: TLSConfig{ - CAFile: InvalidCA, - InsecureSkipVerify: true}, - }, - errorMsg: fmt.Sprintf("unable to use specified CA cert %s", InvalidCA), + errorMsg: fmt.Sprintf("unable to use specified CA cert %s:", MissingCA), }, } @@ -264,7 +247,7 @@ func TestNewClientFromInvalidConfig(t *testing.T) { t.Errorf("No error was returned using this config: %+v", invalidConfig.clientConfig) } if !strings.Contains(err.Error(), invalidConfig.errorMsg) { - t.Errorf("Expected error %q does not contain %q", err.Error(), invalidConfig.errorMsg) + t.Errorf("Expected error %s does not contain %s", err.Error(), invalidConfig.errorMsg) } } } @@ -274,8 +257,8 @@ func TestMissingBearerAuthFile(t *testing.T) { BearerTokenFile: MissingBearerTokenFile, TLSConfig: TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, } @@ -361,8 +344,8 @@ func TestBearerAuthFileRoundTripper(t *testing.T) { func TestTLSConfig(t *testing.T) { configTLSConfig := TLSConfig{ CAFile: TLSCAChainPath, - CertFile: ClientCertificatePath, - KeyFile: ClientKeyNoPassPath, + CertFile: BarneyCertificatePath, + KeyFile: BarneyKeyNoPassPath, ServerName: "localhost", InsecureSkipVerify: false} @@ -374,31 +357,24 @@ func TestTLSConfig(t *testing.T) { rootCAs := x509.NewCertPool() rootCAs.AppendCertsFromPEM(tlsCAChain) + barneyCertificate, err := tls.LoadX509KeyPair(BarneyCertificatePath, BarneyKeyNoPassPath) + if err != nil { + t.Fatalf("Can't load the client key pair ('%s' and '%s'). Reason: %s", + BarneyCertificatePath, BarneyKeyNoPassPath, err) + } + expectedTLSConfig := &tls.Config{ RootCAs: rootCAs, + Certificates: []tls.Certificate{barneyCertificate}, ServerName: configTLSConfig.ServerName, InsecureSkipVerify: configTLSConfig.InsecureSkipVerify} + expectedTLSConfig.BuildNameToCertificate() tlsConfig, err := NewTLSConfig(&configTLSConfig) if err != nil { t.Fatalf("Can't create a new TLS Config from a configuration (%s).", err) } - clientCertificate, err := tls.LoadX509KeyPair(ClientCertificatePath, ClientKeyNoPassPath) - if err != nil { - t.Fatalf("Can't load the client key pair ('%s' and '%s'). Reason: %s", - ClientCertificatePath, ClientKeyNoPassPath, err) - } - cert, err := tlsConfig.GetClientCertificate(nil) - if err != nil { - t.Fatalf("unexpected error returned by tlsConfig.GetClientCertificate(): %s", err) - } - if !reflect.DeepEqual(cert, &clientCertificate) { - t.Fatalf("Unexpected client certificate result: \n\n%+v\n expected\n\n%+v", cert, clientCertificate) - } - - // non-nil functions are never equal. - tlsConfig.GetClientCertificate = nil if !reflect.DeepEqual(tlsConfig, expectedTLSConfig) { t.Fatalf("Unexpected TLS Config result: \n\n%+v\n expected\n\n%+v", tlsConfig, expectedTLSConfig) } @@ -406,12 +382,15 @@ func TestTLSConfig(t *testing.T) { func TestTLSConfigEmpty(t *testing.T) { configTLSConfig := TLSConfig{ - InsecureSkipVerify: true, - } + CAFile: "", + CertFile: "", + KeyFile: "", + ServerName: "", + InsecureSkipVerify: true} expectedTLSConfig := &tls.Config{ - InsecureSkipVerify: configTLSConfig.InsecureSkipVerify, - } + InsecureSkipVerify: configTLSConfig.InsecureSkipVerify} + expectedTLSConfig.BuildNameToCertificate() tlsConfig, err := NewTLSConfig(&configTLSConfig) if err != nil { @@ -435,23 +414,23 @@ func TestTLSConfigInvalidCA(t *testing.T) { KeyFile: "", ServerName: "", InsecureSkipVerify: false}, - errorMessage: fmt.Sprintf("unable to load specified CA cert %s:", MissingCA), + errorMessage: fmt.Sprintf("unable to use specified CA cert %s:", MissingCA), }, { configTLSConfig: TLSConfig{ CAFile: "", CertFile: MissingCert, - KeyFile: ClientKeyNoPassPath, + KeyFile: BarneyKeyNoPassPath, ServerName: "", InsecureSkipVerify: false}, - errorMessage: fmt.Sprintf("unable to use specified client cert (%s) & key (%s):", MissingCert, ClientKeyNoPassPath), + errorMessage: fmt.Sprintf("unable to use specified client cert (%s) & key (%s):", MissingCert, BarneyKeyNoPassPath), }, { configTLSConfig: TLSConfig{ CAFile: "", - CertFile: ClientCertificatePath, + CertFile: BarneyCertificatePath, KeyFile: MissingKey, ServerName: "", InsecureSkipVerify: false}, - errorMessage: fmt.Sprintf("unable to use specified client cert (%s) & key (%s):", ClientCertificatePath, MissingKey), + errorMessage: fmt.Sprintf("unable to use specified client cert (%s) & key (%s):", BarneyCertificatePath, MissingKey), }, } @@ -470,11 +449,11 @@ func TestTLSConfigInvalidCA(t *testing.T) { func TestBasicAuthNoPassword(t *testing.T) { cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.basic-auth.no-password.yaml") if err != nil { - t.Fatalf("Error loading HTTP client config: %v", err) + t.Errorf("Error loading HTTP client config: %v", err) } client, err := NewClientFromConfig(*cfg, "test") if err != nil { - t.Fatalf("Error creating HTTP Client: %v", err) + t.Errorf("Error creating HTTP Client: %v", err) } rt, ok := client.Transport.(*basicAuthRoundTripper) @@ -496,11 +475,11 @@ func TestBasicAuthNoPassword(t *testing.T) { func TestBasicAuthNoUsername(t *testing.T) { cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.basic-auth.no-username.yaml") if err != nil { - t.Fatalf("Error loading HTTP client config: %v", err) + t.Errorf("Error loading HTTP client config: %v", err) } client, err := NewClientFromConfig(*cfg, "test") if err != nil { - t.Fatalf("Error creating HTTP Client: %v", err) + t.Errorf("Error creating HTTP Client: %v", err) } rt, ok := client.Transport.(*basicAuthRoundTripper) @@ -522,16 +501,16 @@ func TestBasicAuthNoUsername(t *testing.T) { func TestBasicAuthPasswordFile(t *testing.T) { cfg, _, err := LoadHTTPConfigFile("testdata/http.conf.basic-auth.good.yaml") if err != nil { - t.Fatalf("Error loading HTTP client config: %v", err) + t.Errorf("Error loading HTTP client config: %v", err) } client, err := NewClientFromConfig(*cfg, "test") if err != nil { - t.Fatalf("Error creating HTTP Client: %v", err) + t.Errorf("Error creating HTTP Client: %v", err) } rt, ok := client.Transport.(*basicAuthRoundTripper) if !ok { - t.Fatalf("Error casting to basic auth transport, %v", client.Transport) + t.Errorf("Error casting to basic auth transport, %v", client.Transport) } if rt.username != "user" { @@ -545,264 +524,6 @@ func TestBasicAuthPasswordFile(t *testing.T) { } } -func getCertificateBlobs(t *testing.T) map[string][]byte { - files := []string{ - TLSCAChainPath, - ClientCertificatePath, - ClientKeyNoPassPath, - ServerCertificatePath, - ServerKeyPath, - WrongClientCertPath, - WrongClientKeyPath, - EmptyFile, - } - bs := make(map[string][]byte, len(files)+1) - for _, f := range files { - b, err := ioutil.ReadFile(f) - if err != nil { - t.Fatal(err) - } - bs[f] = b - } - - return bs -} - -func writeCertificate(bs map[string][]byte, src string, dst string) { - b, ok := bs[src] - if !ok { - panic(fmt.Sprintf("Couldn't find %q in bs", src)) - } - if err := ioutil.WriteFile(dst, b, 0664); err != nil { - panic(err) - } -} - -func TestTLSRoundTripper(t *testing.T) { - bs := getCertificateBlobs(t) - - tmpDir, err := ioutil.TempDir("", "tlsroundtripper") - if err != nil { - t.Fatal("Failed to create tmp dir", err) - } - defer os.RemoveAll(tmpDir) - - ca, cert, key := filepath.Join(tmpDir, "ca"), filepath.Join(tmpDir, "cert"), filepath.Join(tmpDir, "key") - - handler := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, ExpectedMessage) - } - testServer, err := newTestServer(handler) - if err != nil { - t.Fatal(err.Error()) - } - defer testServer.Close() - - testCases := []struct { - ca string - cert string - key string - - errMsg string - }{ - { - // Valid certs. - ca: TLSCAChainPath, - cert: ClientCertificatePath, - key: ClientKeyNoPassPath, - }, - { - // CA not matching. - ca: ClientCertificatePath, - cert: ClientCertificatePath, - key: ClientKeyNoPassPath, - - errMsg: "certificate signed by unknown authority", - }, - { - // Invalid client cert+key. - ca: TLSCAChainPath, - cert: WrongClientCertPath, - key: WrongClientKeyPath, - - errMsg: "remote error: tls", - }, - { - // CA file empty - ca: EmptyFile, - cert: ClientCertificatePath, - key: ClientKeyNoPassPath, - - errMsg: "unable to use specified CA cert", - }, - { - // cert file empty - ca: TLSCAChainPath, - cert: EmptyFile, - key: ClientKeyNoPassPath, - - errMsg: "failed to find any PEM data in certificate input", - }, - { - // key file empty - ca: TLSCAChainPath, - cert: ClientCertificatePath, - key: EmptyFile, - - errMsg: "failed to find any PEM data in key input", - }, - { - // Valid certs again. - ca: TLSCAChainPath, - cert: ClientCertificatePath, - key: ClientKeyNoPassPath, - }, - } - - cfg := HTTPClientConfig{ - TLSConfig: TLSConfig{ - CAFile: ca, - CertFile: cert, - KeyFile: key, - InsecureSkipVerify: false}, - } - - var c *http.Client - for i, tc := range testCases { - tc := tc - t.Run(strconv.Itoa(i), func(t *testing.T) { - writeCertificate(bs, tc.ca, ca) - writeCertificate(bs, tc.cert, cert) - writeCertificate(bs, tc.key, key) - if c == nil { - c, err = NewClientFromConfig(cfg, "test") - if err != nil { - t.Fatalf("Error creating HTTP Client: %v", err) - } - } - - req, err := http.NewRequest(http.MethodGet, testServer.URL, nil) - if err != nil { - t.Fatalf("Error creating HTTP request: %v", err) - } - r, err := c.Do(req) - if len(tc.errMsg) > 0 { - if err == nil { - r.Body.Close() - t.Fatalf("Could connect to the test server.") - } - if !strings.Contains(err.Error(), tc.errMsg) { - t.Fatalf("Expected error message to contain %q, got %q", tc.errMsg, err) - } - return - } - - if err != nil { - t.Fatalf("Can't connect to the test server") - } - - b, err := ioutil.ReadAll(r.Body) - r.Body.Close() - if err != nil { - t.Errorf("Can't read the server response body") - } - - got := strings.TrimSpace(string(b)) - if ExpectedMessage != got { - t.Errorf("The expected message %q differs from the obtained message %q", ExpectedMessage, got) - } - }) - } -} - -func TestTLSRoundTripperRaces(t *testing.T) { - bs := getCertificateBlobs(t) - - tmpDir, err := ioutil.TempDir("", "tlsroundtripper") - if err != nil { - t.Fatal("Failed to create tmp dir", err) - } - defer os.RemoveAll(tmpDir) - - ca, cert, key := filepath.Join(tmpDir, "ca"), filepath.Join(tmpDir, "cert"), filepath.Join(tmpDir, "key") - - handler := func(w http.ResponseWriter, r *http.Request) { - fmt.Fprint(w, ExpectedMessage) - } - testServer, err := newTestServer(handler) - if err != nil { - t.Fatal(err.Error()) - } - defer testServer.Close() - - cfg := HTTPClientConfig{ - TLSConfig: TLSConfig{ - CAFile: ca, - CertFile: cert, - KeyFile: key, - InsecureSkipVerify: false}, - } - - var c *http.Client - writeCertificate(bs, TLSCAChainPath, ca) - writeCertificate(bs, ClientCertificatePath, cert) - writeCertificate(bs, ClientKeyNoPassPath, key) - c, err = NewClientFromConfig(cfg, "test") - if err != nil { - t.Fatalf("Error creating HTTP Client: %v", err) - } - - var wg sync.WaitGroup - ch := make(chan struct{}) - var total, ok int64 - // Spawn 10 Go routines polling the server concurrently. - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-ch: - return - default: - atomic.AddInt64(&total, 1) - r, err := c.Get(testServer.URL) - if err == nil { - r.Body.Close() - atomic.AddInt64(&ok, 1) - } - } - } - }() - } - - // Change the CA file every 10ms for 1 second. - wg.Add(1) - go func() { - defer wg.Done() - i := 0 - for { - tick := time.NewTicker(10 * time.Millisecond) - <-tick.C - if i%2 == 0 { - writeCertificate(bs, ClientCertificatePath, ca) - } else { - writeCertificate(bs, TLSCAChainPath, ca) - } - i++ - if i > 100 { - close(ch) - return - } - } - }() - - wg.Wait() - if ok == total { - t.Fatalf("Expecting some requests to fail but got %d/%d successful requests", ok, total) - } -} - func TestHideHTTPClientConfigSecrets(t *testing.T) { c, _, err := LoadHTTPConfigFile("testdata/http.conf.good.yml") if err != nil { diff --git a/vendor/github.com/prometheus/common/config/testdata/barney-no-pass.key b/vendor/github.com/prometheus/common/config/testdata/barney-no-pass.key new file mode 100644 index 000000000..b8e44f552 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/barney-no-pass.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAxmYjfBZhZbAup9uSULehoqPCv/U+77ETxUNyS2nviWEHDAb/ +pFS8Btx4oCQ1ECVSyxcUmXSlrvDjMY4sisOHvndNRlGi274M5a8Q5yD1BUqvxq3u +XB/+SYNVShBzaswrSjpzMe89AlOPxPjnE14OXh00j2hHunOG4jhlWgJnY0YyvUQQ +YWO6KrmKMiZ4MgmY0SWh/ZhlkDJPtkp3aUVM2sheCru/70E9viLGfdlhc2pIMshy +wNp4/5IkHBZwbqXFFGX4sRtSXI/auZNvcHOBse+3e3BonWvBWS2lIYbzpX3vLB7B +E9BGIxWn1fgNQr14yFPaccSszBvgtmEUONolnwIDAQABAoIBAQC7nBhQHgXKGBl2 +Z97rb0pstrjRtsLl/Cg68LWi9LEr0tHMIM4bgnkvb8qtfK+k7fZl0BSNrE2EqYvd +75jVO2MgzEYJieccLpKZm7u7JGIut9qSYSU2fpaCw6uiVv4dbqY9EhqejKG/km8w +j0JMATRK8Qkj1zOE7/wL7dKBlCZaK3u+OT17spuA/21PG/cLiPaSGSA3CU/eqbkU +BD6JeBxp33XNTytwWoOvarsigpL0dGqQ7+qhGq6t69qFfWoe9rimV7Ya+tB9zF/U +HzOIEspOYvzxe+C7VJjlVFr4haMYmsrO9qRUJ2ofp49OLVdfEANsdVISSvS63BEp +gBZN8Ko5AoGBAO1z8y8YCsI+2vBG6nxZ1eMba0KHi3bS8db1TaenJBV22w6WQATh +hEaU6VLMFcMvrOUjXN/7HJfnEMyvFT6gb9obPDVEMZw88s9lVN6njgGLZR/jodyN +7N7utLopN043Ra0WfEILAXPSz8esT1yn05OZV6AFHxJEWMrX3/4+spCLAoGBANXl +RomieVY4u3FF/uzhbzKNNb9ETxrQuexfbangKp5eLniwnr2SQWIbyPzeurwp15J8 +HvxB2vpNvs1khSwNx9dQfMdiUVPGLWj7MimAHTHsnQ9LVV9W28ghuSWbjQDGTUt1 +WCCu1MkKIOzupbi+zgsNlI33yilRQKAb9SRxdy29AoGBAOKpvyZiPcrkMxwPpb/k +BU7QGpgcSR25CQ+Xg3QZEVHH7h1DgYLnPtwdQ4g8tj1mohTsp7hKvSWndRrdulrY +zUyWmOeD3BN2/pTI9rW/nceNp49EPHsLo2O+2xelRlzMWB98ikqEtPM59gt1SSB6 +N3X6d3GR0fIe+d9PKEtK0Cs3AoGAZ9r8ReXSvm+ra5ON9Nx8znHMEAON2TpRnBi1 +uY7zgpO+QrGXUfqKrqVJEKbgym4SkribnuYm+fP32eid1McYKk6VV4ZAcMm/0MJv +F8Fx64S0ufFdEX6uFl1xdXYyn5apfyMJ2EyrWrYFSKWTZ8GVb753S/tteGRQWa1Z +eQly0Y0CgYEAnI6G9KFvXI+MLu5y2LPYAwsesDFzaWwyDl96ioQTA9hNSrjR33Vw +xwpiEe0T/WKF8NQ0QWnrQDbTvuCvZUK37TVxscYWuItL6vnBrYqr4Ck0j1BcGwV5 +jT581A/Vw8JJiR/vfcxgmrFYqoUmkMKDmCN1oImfz09GtQ4jQ1rlxz8= +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/prometheus/common/config/testdata/barney.crt b/vendor/github.com/prometheus/common/config/testdata/barney.crt new file mode 100644 index 000000000..e2f950483 --- /dev/null +++ b/vendor/github.com/prometheus/common/config/testdata/barney.crt @@ -0,0 +1,96 @@ +Certificate: + Data: + Version: 3 (0x2) + Serial Number: 2 (0x2) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green TLS CA + Validity + Not Before: Jul 13 04:02:47 2017 GMT + Not After : Jul 13 04:02:47 2019 GMT + Subject: C=NO, O=Telenor AS, OU=Support, CN=Barney Rubble + Subject Public Key Info: + Public Key Algorithm: rsaEncryption + Public-Key: (2048 bit) + Modulus: + 00:c6:66:23:7c:16:61:65:b0:2e:a7:db:92:50:b7: + a1:a2:a3:c2:bf:f5:3e:ef:b1:13:c5:43:72:4b:69: + ef:89:61:07:0c:06:ff:a4:54:bc:06:dc:78:a0:24: + 35:10:25:52:cb:17:14:99:74:a5:ae:f0:e3:31:8e: + 2c:8a:c3:87:be:77:4d:46:51:a2:db:be:0c:e5:af: + 10:e7:20:f5:05:4a:af:c6:ad:ee:5c:1f:fe:49:83: + 55:4a:10:73:6a:cc:2b:4a:3a:73:31:ef:3d:02:53: + 8f:c4:f8:e7:13:5e:0e:5e:1d:34:8f:68:47:ba:73: + 86:e2:38:65:5a:02:67:63:46:32:bd:44:10:61:63: + ba:2a:b9:8a:32:26:78:32:09:98:d1:25:a1:fd:98: + 65:90:32:4f:b6:4a:77:69:45:4c:da:c8:5e:0a:bb: + bf:ef:41:3d:be:22:c6:7d:d9:61:73:6a:48:32:c8: + 72:c0:da:78:ff:92:24:1c:16:70:6e:a5:c5:14:65: + f8:b1:1b:52:5c:8f:da:b9:93:6f:70:73:81:b1:ef: + b7:7b:70:68:9d:6b:c1:59:2d:a5:21:86:f3:a5:7d: + ef:2c:1e:c1:13:d0:46:23:15:a7:d5:f8:0d:42:bd: + 78:c8:53:da:71:c4:ac:cc:1b:e0:b6:61:14:38:da: + 25:9f + Exponent: 65537 (0x10001) + X509v3 extensions: + X509v3 Key Usage: critical + Digital Signature + X509v3 Basic Constraints: + CA:FALSE + X509v3 Extended Key Usage: + TLS Web Client Authentication + X509v3 Subject Key Identifier: + F4:17:02:DD:1B:01:AB:C5:BC:17:A4:5C:4B:75:8E:EC:B1:E0:C8:F1 + X509v3 Authority Key Identifier: + keyid:AE:42:88:75:DD:05:A6:8E:48:7F:50:69:F9:B7:34:23:49:B8:B4:71 + + Authority Information Access: + CA Issuers - URI:http://green.no/ca/tls-ca.cer + + X509v3 CRL Distribution Points: + + Full Name: + URI:http://green.no/ca/tls-ca.crl + + X509v3 Subject Alternative Name: + email:barney@telenor.no + Signature Algorithm: sha1WithRSAEncryption + 96:9a:c5:41:8a:2f:4a:c4:80:d9:2b:1a:cf:07:85:e9:b6:18: + 01:20:41:b9:c3:d4:ca:d3:2d:66:c3:1d:52:7f:25:d7:92:0c: + e9:a9:ae:e6:2e:fa:9d:0a:cf:84:b9:03:f2:63:e3:d3:c9:70: + 6a:ac:04:5e:a9:2d:a2:43:7a:34:60:f7:a9:32:e1:48:ec:c6: + 03:ac:b3:06:2e:48:6e:d0:35:11:31:3d:0c:04:66:41:e6:b2: + ec:8c:68:f8:e4:bc:47:85:39:60:69:a9:8a:ee:2f:56:88:8a: + 19:45:d0:84:8e:c2:27:2c:82:9c:07:6c:34:ae:41:61:63:f9: + 32:cb:8b:33:ea:2c:15:5f:f9:35:b0:3c:51:4d:5f:30:de:0b: + 88:28:94:79:f3:bd:69:37:ad:12:20:e1:6b:1d:b6:77:d9:83: + db:81:a4:53:6c:0f:6a:17:5e:2b:c1:94:c6:42:e3:73:cd:9e: + 79:1b:8c:89:cd:da:ce:b0:f4:21:c5:32:25:04:6e:68:9f:a7: + ca:f4:c5:86:e5:4e:d9:fd:69:73:e6:15:50:6e:76:0f:73:5e: + 7a:a3:f4:dc:15:4a:ab:bb:3c:9a:fa:9f:01:7a:5c:47:a9:a3: + 68:1c:49:e0:37:37:77:af:87:07:16:e4:e1:d7:98:39:15:a6: + 51:5d:4c:db +-----BEGIN CERTIFICATE----- +MIIEITCCAwmgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBdMQswCQYDVQQGEwJOTzER +MA8GA1UECgwIR3JlZW4gQVMxJDAiBgNVBAsMG0dyZWVuIENlcnRpZmljYXRlIEF1 +dGhvcml0eTEVMBMGA1UEAwwMR3JlZW4gVExTIENBMB4XDTE3MDcxMzA0MDI0N1oX +DTE5MDcxMzA0MDI0N1owTDELMAkGA1UEBhMCTk8xEzARBgNVBAoMClRlbGVub3Ig +QVMxEDAOBgNVBAsMB1N1cHBvcnQxFjAUBgNVBAMMDUJhcm5leSBSdWJibGUwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGZiN8FmFlsC6n25JQt6Gio8K/ +9T7vsRPFQ3JLae+JYQcMBv+kVLwG3HigJDUQJVLLFxSZdKWu8OMxjiyKw4e+d01G +UaLbvgzlrxDnIPUFSq/Gre5cH/5Jg1VKEHNqzCtKOnMx7z0CU4/E+OcTXg5eHTSP +aEe6c4biOGVaAmdjRjK9RBBhY7oquYoyJngyCZjRJaH9mGWQMk+2SndpRUzayF4K +u7/vQT2+IsZ92WFzakgyyHLA2nj/kiQcFnBupcUUZfixG1Jcj9q5k29wc4Gx77d7 +cGida8FZLaUhhvOlfe8sHsET0EYjFafV+A1CvXjIU9pxxKzMG+C2YRQ42iWfAgMB +AAGjgfwwgfkwDgYDVR0PAQH/BAQDAgeAMAkGA1UdEwQCMAAwEwYDVR0lBAwwCgYI +KwYBBQUHAwIwHQYDVR0OBBYEFPQXAt0bAavFvBekXEt1juyx4MjxMB8GA1UdIwQY +MBaAFK5CiHXdBaaOSH9Qafm3NCNJuLRxMDkGCCsGAQUFBwEBBC0wKzApBggrBgEF +BQcwAoYdaHR0cDovL2dyZWVuLm5vL2NhL3Rscy1jYS5jZXIwLgYDVR0fBCcwJTAj +oCGgH4YdaHR0cDovL2dyZWVuLm5vL2NhL3Rscy1jYS5jcmwwHAYDVR0RBBUwE4ER +YmFybmV5QHRlbGVub3Iubm8wDQYJKoZIhvcNAQEFBQADggEBAJaaxUGKL0rEgNkr +Gs8Hhem2GAEgQbnD1MrTLWbDHVJ/JdeSDOmpruYu+p0Kz4S5A/Jj49PJcGqsBF6p +LaJDejRg96ky4UjsxgOsswYuSG7QNRExPQwEZkHmsuyMaPjkvEeFOWBpqYruL1aI +ihlF0ISOwicsgpwHbDSuQWFj+TLLizPqLBVf+TWwPFFNXzDeC4golHnzvWk3rRIg +4WsdtnfZg9uBpFNsD2oXXivBlMZC43PNnnkbjInN2s6w9CHFMiUEbmifp8r0xYbl +Ttn9aXPmFVBudg9zXnqj9NwVSqu7PJr6nwF6XEepo2gcSeA3N3evhwcW5OHXmDkV +plFdTNs= +-----END CERTIFICATE----- diff --git a/vendor/github.com/prometheus/common/config/testdata/client-no-pass.key b/vendor/github.com/prometheus/common/config/testdata/client-no-pass.key deleted file mode 100644 index ac0e28a5e..000000000 --- a/vendor/github.com/prometheus/common/config/testdata/client-no-pass.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC307b8Il9zajKw -mkOih8sfYI+O9gSTvvyQN7Bh+Bu6lLN+XhtRxt+ZqOHfqo30EuPmdScMrqregqup -VPGKgfkXVP3hF5rYdWqZx4XOKdyxbaarZupkAv2gtVNEBSmVSj8urt5WZOJVnF7Q -GmhCAHpx34L5CCPYDXJBd5ExLwGIByKxQNugor7dJx8ehmVkGKto01GWjgY+sPYp -lV9KxvD49ygXYQ6VAqgt/V2EG/PMmT0/jUtmM2tYDFztPkSISJg0vB/f9zHlYIdD -GjkBjngekAij77T93xEuouox25UtXmg6ApqvDVEiBxZmN5Dt70HBsQ+IftENEUoY -8jhrImwBAgMBAAECggEBAJNlgjK3SPvdKlnqx9KZuagmH9YMs+zX1eG5lYdojqtT -snzf7l3q7b1i6gIS2pHbV7uhMjd8EmwqMIStJKPfxaAMuSj0aWeo9lnp3wNJE7l8 -54hGFCkvMLjcy7Adx5L6HqFK++IgME9e+7M3iWNqyMNn6bfO7Ba/6V5PBi9+tmaf -nZWqgY2Kf8A2iNnm9RvmiwQ42nsjVsKcXzGdBmFTp69ar/QWtk1dWDajUVw/NctM -cs+IypPjZiAE3CgyyiLKzG9CWCjkfMEd14uxFE73q2SAG6RWYSnv1M3WOupAF0rP -ll/NMXaMjLlq2q3B9v2ZAaojbbWlHLDdEpE/jwXkkwECgYEA5iWN7SGH8ZE6wDfO -EYuTQKpqYt1WbCQxv77leuGcm1KlFYfV8LsB/9xiocVtGm7N126zuwfgzfkIZWQD -KrpoFUkz1jUg+kHCqf4FO8hzR0By3hbdTImJQILtC/K3fHJtexFKiW82mb40lgYc -+Mk6Nb5CmL6VCX5u8MNBvD8WaLECgYEAzHofIneLLLqF2f2uVzF743CdgP1h0fPI -BS3akp56/8qzQWNW+natJRxiTh2R8gdvB+P/UtEZR8E+FbSzZ4dIRrxIi44ew0Cr -sROaP4LkaZFflKS/fD8S1M7yZQhussRoRWH0BDvM0hsu6UTGlESHX73b7js4AHpB -2q4frJMTDFECgYBr2f2Aus3yLpTRr1Uqc7Y1/6aLXh4531xQ9yyjQUcaosgqJtXj -Uj/Fn4m5NcPDN1nPM1mWtEJtQ97jZNL3GxPbpcpc/9jMbjTDZP8e3Pjo0xMBcMWU -MH/Zc4GSr9O8xgL4QUokzbFQqwoJpCO/ks1skhSzb9x37oAe4+HSTd46gQKBgQCk -+9hJSCl8kpdTl5Nm+R9cGU6MeGXIMKnwO9pDOSpHX7cZCF1yw/Tan7dWDhfnMEZP -GJC3ss1yDyLYArBK1WXk5SCnsalyo6ikvQtVOXixEUIMvo1eY8n++WetS4t+JGl5 -qhponBOcZ6CHSR3tHgoYnyloZFHAWOTv3FTkOttAsQKBgQCzWSO2TA4v/vIKIrSV -Lf2cI51imcy/JCsYUU+o66VQ6QdIJlfamuAKaKYAwfJtHtZOzAgrh09JV3qEEtN5 -duBdXiuygAz8eHbqSoSe5FYgImI0BREDq8Zm3ArgUhv6S9aBeg/mS1W/5ZfmV2cT -0MdlE8vUtcbDkmKpi7CaklzMNw== ------END PRIVATE KEY----- diff --git a/vendor/github.com/prometheus/common/config/testdata/client.crt b/vendor/github.com/prometheus/common/config/testdata/client.crt deleted file mode 100644 index b406f392b..000000000 --- a/vendor/github.com/prometheus/common/config/testdata/client.crt +++ /dev/null @@ -1,96 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: 3 (0x3) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus TLS CA - Validity - Not Before: Apr 5 08:10:12 2019 GMT - Not After : Mar 26 08:10:12 2059 GMT - Subject: C=US, O=Prometheus, CN=Client - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) - Modulus: - 00:b7:d3:b6:fc:22:5f:73:6a:32:b0:9a:43:a2:87: - cb:1f:60:8f:8e:f6:04:93:be:fc:90:37:b0:61:f8: - 1b:ba:94:b3:7e:5e:1b:51:c6:df:99:a8:e1:df:aa: - 8d:f4:12:e3:e6:75:27:0c:ae:aa:de:82:ab:a9:54: - f1:8a:81:f9:17:54:fd:e1:17:9a:d8:75:6a:99:c7: - 85:ce:29:dc:b1:6d:a6:ab:66:ea:64:02:fd:a0:b5: - 53:44:05:29:95:4a:3f:2e:ae:de:56:64:e2:55:9c: - 5e:d0:1a:68:42:00:7a:71:df:82:f9:08:23:d8:0d: - 72:41:77:91:31:2f:01:88:07:22:b1:40:db:a0:a2: - be:dd:27:1f:1e:86:65:64:18:ab:68:d3:51:96:8e: - 06:3e:b0:f6:29:95:5f:4a:c6:f0:f8:f7:28:17:61: - 0e:95:02:a8:2d:fd:5d:84:1b:f3:cc:99:3d:3f:8d: - 4b:66:33:6b:58:0c:5c:ed:3e:44:88:48:98:34:bc: - 1f:df:f7:31:e5:60:87:43:1a:39:01:8e:78:1e:90: - 08:a3:ef:b4:fd:df:11:2e:a2:ea:31:db:95:2d:5e: - 68:3a:02:9a:af:0d:51:22:07:16:66:37:90:ed:ef: - 41:c1:b1:0f:88:7e:d1:0d:11:4a:18:f2:38:6b:22: - 6c:01 - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Key Usage: critical - Digital Signature - X509v3 Basic Constraints: - CA:FALSE - X509v3 Extended Key Usage: - TLS Web Client Authentication - X509v3 Subject Key Identifier: - 3A:46:D1:C5:8C:42:60:AC:EF:0C:DD:4B:55:1E:F0:D7:5C:76:C3:33 - X509v3 Authority Key Identifier: - keyid:4D:02:BF:71:95:6A:AA:58:C5:9C:B8:83:67:5E:64:16:99:E1:2A:9E - - Authority Information Access: - CA Issuers - URI:http://example.com/ca/tls-ca.cer - - X509v3 CRL Distribution Points: - - Full Name: - URI:http://example.com/ca/tls-ca.crl - - X509v3 Subject Alternative Name: - email:client@prometheus.example.com - Signature Algorithm: sha1WithRSAEncryption - 73:fc:87:f2:cf:e3:b1:df:2f:f7:bf:f9:74:dc:0b:f0:7f:95: - ef:77:ba:6a:7d:c6:c5:f3:d9:d6:c7:eb:f8:a8:30:d3:90:d5: - a5:0c:32:33:95:85:a2:05:6e:78:a7:07:a5:e0:cf:f4:65:ef: - d2:6d:86:66:2a:7f:13:78:2f:90:dd:9d:a4:34:d4:8f:df:41: - 1b:0f:17:99:99:06:2d:26:86:e2:58:3e:84:ca:13:9e:00:ca: - 82:07:63:e7:6c:df:e9:47:d6:b3:f7:51:1a:31:f4:3d:79:95: - e7:ea:bf:40:84:48:09:23:ba:31:b1:67:cd:05:50:ec:e6:0a: - d8:2b:7d:7d:73:7a:8a:5f:f7:72:28:57:9f:15:2d:b1:4e:a1: - 3c:06:53:60:6e:b2:f9:04:08:81:3a:f2:ba:5d:7e:ac:93:f7: - 3b:1a:de:07:6e:14:a2:0b:e2:28:6a:50:2d:d8:9b:3c:25:e2: - 82:6b:90:7e:45:7b:dd:3a:7a:8e:71:99:a7:e8:88:5f:06:71: - 5b:3f:18:85:70:f9:eb:c7:26:43:2b:49:8f:17:90:aa:ba:86: - 8a:52:63:83:9f:9d:5d:79:53:af:6d:1a:7e:47:0d:ea:3f:33: - 18:c0:5f:90:d0:c5:04:8b:e3:4a:45:3d:a6:8c:c3:d1:47:1c: - 45:70:a4:75 ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIBAzANBgkqhkiG9w0BAQUFADBpMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGjAYBgNVBAMMEVByb21ldGhldXMgVExTIENBMCAXDTE5 -MDQwNTA4MTAxMloYDzIwNTkwMzI2MDgxMDEyWjAzMQswCQYDVQQGEwJVUzETMBEG -A1UECgwKUHJvbWV0aGV1czEPMA0GA1UEAwwGQ2xpZW50MIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt9O2/CJfc2oysJpDoofLH2CPjvYEk778kDewYfgb -upSzfl4bUcbfmajh36qN9BLj5nUnDK6q3oKrqVTxioH5F1T94Rea2HVqmceFzinc -sW2mq2bqZAL9oLVTRAUplUo/Lq7eVmTiVZxe0BpoQgB6cd+C+Qgj2A1yQXeRMS8B -iAcisUDboKK+3ScfHoZlZBiraNNRlo4GPrD2KZVfSsbw+PcoF2EOlQKoLf1dhBvz -zJk9P41LZjNrWAxc7T5EiEiYNLwf3/cx5WCHQxo5AY54HpAIo++0/d8RLqLqMduV -LV5oOgKarw1RIgcWZjeQ7e9BwbEPiH7RDRFKGPI4ayJsAQIDAQABo4IBDzCCAQsw -DgYDVR0PAQH/BAQDAgeAMAkGA1UdEwQCMAAwEwYDVR0lBAwwCgYIKwYBBQUHAwIw -HQYDVR0OBBYEFDpG0cWMQmCs7wzdS1Ue8NdcdsMzMB8GA1UdIwQYMBaAFE0Cv3GV -aqpYxZy4g2deZBaZ4SqeMDwGCCsGAQUFBwEBBDAwLjAsBggrBgEFBQcwAoYgaHR0 -cDovL2V4YW1wbGUuY29tL2NhL3Rscy1jYS5jZXIwMQYDVR0fBCowKDAmoCSgIoYg -aHR0cDovL2V4YW1wbGUuY29tL2NhL3Rscy1jYS5jcmwwKAYDVR0RBCEwH4EdY2xp -ZW50QHByb21ldGhldXMuZXhhbXBsZS5jb20wDQYJKoZIhvcNAQEFBQADggEBAHP8 -h/LP47HfL/e/+XTcC/B/le93ump9xsXz2dbH6/ioMNOQ1aUMMjOVhaIFbninB6Xg -z/Rl79JthmYqfxN4L5DdnaQ01I/fQRsPF5mZBi0mhuJYPoTKE54AyoIHY+ds3+lH -1rP3URox9D15lefqv0CESAkjujGxZ80FUOzmCtgrfX1zeopf93IoV58VLbFOoTwG -U2BusvkECIE68rpdfqyT9zsa3gduFKIL4ihqUC3Ymzwl4oJrkH5Fe906eo5xmafo -iF8GcVs/GIVw+evHJkMrSY8XkKq6hopSY4OfnV15U69tGn5HDeo/MxjAX5DQxQSL -40pFPaaMw9FHHEVwpHU= ------END CERTIFICATE----- diff --git a/vendor/github.com/prometheus/common/config/testdata/empty b/vendor/github.com/prometheus/common/config/testdata/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/prometheus/common/config/testdata/self-signed-client.crt b/vendor/github.com/prometheus/common/config/testdata/self-signed-client.crt deleted file mode 100644 index fe2973ab7..000000000 --- a/vendor/github.com/prometheus/common/config/testdata/self-signed-client.crt +++ /dev/null @@ -1,121 +0,0 @@ -Certificate: - Data: - Version: 3 (0x2) - Serial Number: - 0e:47:ce:db:33:a0:10:93:9b:b1:ac:66:7c:16:2d:89:d0:b7:ea:1d - Signature Algorithm: sha256WithRSAEncryption - Issuer: C = US, ST = Denial, L = Springfield, O = Dis, CN = www.example.com - Validity - Not Before: Mar 1 16:51:42 2019 GMT - Not After : Jul 17 16:51:42 2046 GMT - Subject: C = US, ST = Denial, L = Springfield, O = Dis, CN = www.example.com - Subject Public Key Info: - Public Key Algorithm: rsaEncryption - RSA Public-Key: (4096 bit) - Modulus: - 00:ce:c6:ab:fd:9c:d2:da:55:f9:3d:5f:c0:0d:1a: - a6:1c:d1:7f:01:f4:0d:9c:ce:85:8b:01:8f:06:73: - 0a:b6:92:e1:6e:63:7d:e4:83:ca:c0:11:67:70:d9: - 89:0c:a9:62:0a:c3:cc:00:53:6f:b6:1b:0b:e1:eb: - 62:00:e8:ed:14:16:c6:29:45:0c:ee:25:40:21:10: - c2:3d:9a:3b:5c:27:54:bb:e4:9c:f6:e3:b4:dc:f1: - 0e:ba:c5:6f:60:94:45:b8:8d:f6:a4:1a:b4:fa:82: - 7b:5a:55:a6:11:c1:d4:e6:41:dc:c7:41:8e:db:46: - 6b:a2:0a:c1:13:96:47:12:4b:27:2e:d5:45:d4:51: - c9:b6:28:f8:0d:24:44:42:12:b8:b4:cd:ab:4a:67: - ba:8c:ff:34:92:38:b4:e5:4a:53:fe:33:72:55:df: - 27:d9:70:0f:47:cc:7c:d5:b2:52:bf:80:c0:a7:15: - b0:25:c8:d9:a1:41:e2:ee:e9:f5:0f:9f:27:ea:7c: - dc:ec:19:48:73:74:48:47:13:59:ea:89:e0:61:50: - 08:95:fc:32:9d:73:21:8e:b2:75:95:41:62:0c:61: - c7:b9:59:e2:51:a2:4f:bd:74:1b:0d:26:3c:c8:a6: - 1a:cb:db:10:cc:33:dd:2a:0b:38:55:60:85:f8:25: - 74:1f:0d:26:4e:db:2d:03:12:d5:85:00:cf:51:01: - 95:94:c8:85:cc:0e:5a:05:aa:3e:7a:34:e2:17:8b: - 3b:c5:21:a2:da:56:0a:ed:de:6c:2c:40:10:85:25: - 5d:df:39:e9:45:0e:10:82:bf:34:5c:64:52:35:4b: - aa:1a:56:37:ab:1f:7f:b5:07:5f:8a:22:45:4d:96: - 21:6c:a2:eb:47:39:bf:38:de:b5:4c:99:af:bf:de: - f8:7c:54:8b:40:2e:1f:80:1b:97:6a:fe:2c:05:6a: - 1b:9c:cb:a1:1c:f9:9e:36:ef:d9:a2:1d:d4:61:d0: - 6d:d1:b6:00:f8:e7:7f:74:f8:c0:81:95:7d:68:dc: - f3:93:7d:49:33:99:15:d5:49:d6:6d:69:82:c1:9f: - f2:3e:c2:db:0b:b1:e6:7c:e5:98:f4:9f:01:7d:57: - ac:36:78:15:a9:54:6f:e6:3e:52:54:68:a3:bc:8f: - 99:3f:02:02:1f:d2:21:b1:39:70:61:4c:2f:71:e5: - 27:d3:d0:75:46:d7:5e:78:ee:82:a5:bd:6d:12:2d: - 0b:40:92:61:c0:9e:8c:71:be:d1:bb:4f:23:fe:4e: - f2:79:a0:bd:60:f8:62:e4:9a:5b:1d:e0:a7:99:bd: - 32:b2:29:7b:ca:8c:6b:1a:80:c8:6f:b3:aa:a0:9e: - 1b:03:ab - Exponent: 65537 (0x10001) - X509v3 extensions: - X509v3 Subject Key Identifier: - D3:CB:AC:FB:69:9C:D3:14:67:44:9F:FA:0F:B9:02:60:64:95:4E:17 - X509v3 Authority Key Identifier: - keyid:D3:CB:AC:FB:69:9C:D3:14:67:44:9F:FA:0F:B9:02:60:64:95:4E:17 - - X509v3 Basic Constraints: critical - CA:TRUE - Signature Algorithm: sha256WithRSAEncryption - 57:d6:69:ed:9e:05:ea:4d:64:3b:88:98:26:6c:00:6e:e7:b7: - cb:ff:48:a2:c1:50:03:39:28:46:94:c0:19:7d:ff:10:7b:11: - 6e:88:6d:fe:d8:62:3a:ce:28:33:64:86:85:0f:9f:bf:13:23: - 48:11:b0:86:fa:7a:1d:6b:8a:e7:8c:76:fb:1b:a8:a9:d5:b3: - b8:f0:b4:08:27:a4:91:14:1a:e3:1c:11:83:39:2c:20:f1:19: - 21:35:9e:af:69:eb:52:ec:eb:c8:63:e2:bd:76:46:c5:4b:0c: - c2:f7:b9:c3:2a:db:31:4a:b9:ea:a5:04:c4:e7:b6:cf:fc:7c: - 8b:8a:88:39:ad:f9:06:e1:c6:63:47:6c:47:5c:e9:0b:24:b5: - c1:eb:5d:67:ee:07:ac:42:5b:d4:cb:00:eb:ec:c5:2f:3a:d0: - 76:f1:2a:9c:b9:44:3e:ed:71:40:02:4d:68:b5:b4:09:de:4d: - ba:1c:87:86:2d:3c:b7:2c:e5:87:aa:ff:e2:5e:ad:0b:8c:bb: - 39:9a:13:26:e3:c4:34:00:48:06:14:8f:ec:4b:cb:e7:be:80: - bd:c7:6c:b0:75:88:4e:cd:b7:b1:7e:bf:92:85:c7:a0:45:4f: - 73:ba:a7:27:86:8f:12:cd:35:f7:8c:34:3f:66:1a:7f:53:1d: - 21:8c:90:22:ff:e7:d9:95:aa:15:c2:28:d0:c5:9b:6c:61:e9: - 15:ff:63:9f:8e:d8:b4:a2:d5:06:38:1a:cc:5f:89:2a:23:70: - a3:32:22:cd:00:20:c7:65:60:17:5e:8a:cc:dc:96:08:38:a5: - 7d:65:46:79:79:02:11:04:4b:86:9d:f3:b3:2c:c6:2d:18:b4: - 31:e1:86:aa:4c:0c:93:c3:fb:7a:5a:63:c2:6f:68:d3:86:2c: - 6d:cd:ab:6d:41:d2:36:32:c1:52:25:d0:68:bc:ac:ca:f3:41: - f6:5a:46:83:15:bd:e6:aa:3b:dc:6b:44:1f:6c:02:e9:ed:b5: - 91:28:8d:af:6f:27:1b:71:83:61:a8:8e:15:36:01:92:42:32: - 61:62:43:04:31:f7:f3:f3:c9:c0:93:19:c9:dd:4d:51:3b:64: - 3b:06:90:4f:93:22:15:6e:8b:5f:2e:4e:11:a7:b9:a3:f2:fe: - 45:c9:ea:4b:58:57:95:b3:77:29:9f:7d:bc:1d:a2:3d:5a:38: - b3:72:b2:c7:8b:12:a9:39:4f:4f:2e:bb:7e:ce:91:bb:82:c0: - 67:37:79:f6:9c:75:3b:39:6c:82:ac:6a:06:09:70:99:10:76: - a4:38:46:50:7d:8e:d0:24:fb:dd:32:8f:40:00:d9:d1:50:20: - 69:bd:86:b9:9e:89:23:60 ------BEGIN CERTIFICATE----- -MIIFmTCCA4GgAwIBAgIUDkfO2zOgEJObsaxmfBYtidC36h0wDQYJKoZIhvcNAQEL -BQAwXDELMAkGA1UEBhMCVVMxDzANBgNVBAgMBkRlbmlhbDEUMBIGA1UEBwwLU3By -aW5nZmllbGQxDDAKBgNVBAoMA0RpczEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29t -MB4XDTE5MDMwMTE2NTE0MloXDTQ2MDcxNzE2NTE0MlowXDELMAkGA1UEBhMCVVMx -DzANBgNVBAgMBkRlbmlhbDEUMBIGA1UEBwwLU3ByaW5nZmllbGQxDDAKBgNVBAoM -A0RpczEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEAzsar/ZzS2lX5PV/ADRqmHNF/AfQNnM6FiwGPBnMKtpLh -bmN95IPKwBFncNmJDKliCsPMAFNvthsL4etiAOjtFBbGKUUM7iVAIRDCPZo7XCdU -u+Sc9uO03PEOusVvYJRFuI32pBq0+oJ7WlWmEcHU5kHcx0GO20ZrogrBE5ZHEksn -LtVF1FHJtij4DSREQhK4tM2rSme6jP80kji05UpT/jNyVd8n2XAPR8x81bJSv4DA -pxWwJcjZoUHi7un1D58n6nzc7BlIc3RIRxNZ6ongYVAIlfwynXMhjrJ1lUFiDGHH -uVniUaJPvXQbDSY8yKYay9sQzDPdKgs4VWCF+CV0Hw0mTtstAxLVhQDPUQGVlMiF -zA5aBao+ejTiF4s7xSGi2lYK7d5sLEAQhSVd3znpRQ4Qgr80XGRSNUuqGlY3qx9/ -tQdfiiJFTZYhbKLrRzm/ON61TJmvv974fFSLQC4fgBuXav4sBWobnMuhHPmeNu/Z -oh3UYdBt0bYA+Od/dPjAgZV9aNzzk31JM5kV1UnWbWmCwZ/yPsLbC7HmfOWY9J8B -fVesNngVqVRv5j5SVGijvI+ZPwICH9IhsTlwYUwvceUn09B1RtdeeO6Cpb1tEi0L -QJJhwJ6Mcb7Ru08j/k7yeaC9YPhi5JpbHeCnmb0ysil7yoxrGoDIb7OqoJ4bA6sC -AwEAAaNTMFEwHQYDVR0OBBYEFNPLrPtpnNMUZ0Sf+g+5AmBklU4XMB8GA1UdIwQY -MBaAFNPLrPtpnNMUZ0Sf+g+5AmBklU4XMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggIBAFfWae2eBepNZDuImCZsAG7nt8v/SKLBUAM5KEaUwBl9/xB7 -EW6Ibf7YYjrOKDNkhoUPn78TI0gRsIb6eh1riueMdvsbqKnVs7jwtAgnpJEUGuMc -EYM5LCDxGSE1nq9p61Ls68hj4r12RsVLDML3ucMq2zFKueqlBMTnts/8fIuKiDmt -+QbhxmNHbEdc6QsktcHrXWfuB6xCW9TLAOvsxS860HbxKpy5RD7tcUACTWi1tAne -Tboch4YtPLcs5Yeq/+JerQuMuzmaEybjxDQASAYUj+xLy+e+gL3HbLB1iE7Nt7F+ -v5KFx6BFT3O6pyeGjxLNNfeMND9mGn9THSGMkCL/59mVqhXCKNDFm2xh6RX/Y5+O -2LSi1QY4GsxfiSojcKMyIs0AIMdlYBdeiszclgg4pX1lRnl5AhEES4ad87Msxi0Y -tDHhhqpMDJPD+3paY8JvaNOGLG3Nq21B0jYywVIl0Gi8rMrzQfZaRoMVveaqO9xr -RB9sAunttZEoja9vJxtxg2GojhU2AZJCMmFiQwQx9/PzycCTGcndTVE7ZDsGkE+T -IhVui18uThGnuaPy/kXJ6ktYV5Wzdymffbwdoj1aOLNysseLEqk5T08uu37OkbuC -wGc3efacdTs5bIKsagYJcJkQdqQ4RlB9jtAk+90yj0AA2dFQIGm9hrmeiSNg ------END CERTIFICATE----- diff --git a/vendor/github.com/prometheus/common/config/testdata/self-signed-client.key b/vendor/github.com/prometheus/common/config/testdata/self-signed-client.key deleted file mode 100644 index f70895131..000000000 --- a/vendor/github.com/prometheus/common/config/testdata/self-signed-client.key +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJRQIBADANBgkqhkiG9w0BAQEFAASCCS8wggkrAgEAAoICAQDOxqv9nNLaVfk9 -X8ANGqYc0X8B9A2czoWLAY8Gcwq2kuFuY33kg8rAEWdw2YkMqWIKw8wAU2+2Gwvh -62IA6O0UFsYpRQzuJUAhEMI9mjtcJ1S75Jz247Tc8Q66xW9glEW4jfakGrT6gnta -VaYRwdTmQdzHQY7bRmuiCsETlkcSSycu1UXUUcm2KPgNJERCEri0zatKZ7qM/zSS -OLTlSlP+M3JV3yfZcA9HzHzVslK/gMCnFbAlyNmhQeLu6fUPnyfqfNzsGUhzdEhH -E1nqieBhUAiV/DKdcyGOsnWVQWIMYce5WeJRok+9dBsNJjzIphrL2xDMM90qCzhV -YIX4JXQfDSZO2y0DEtWFAM9RAZWUyIXMDloFqj56NOIXizvFIaLaVgrt3mwsQBCF -JV3fOelFDhCCvzRcZFI1S6oaVjerH3+1B1+KIkVNliFsoutHOb843rVMma+/3vh8 -VItALh+AG5dq/iwFahucy6Ec+Z4279miHdRh0G3RtgD45390+MCBlX1o3POTfUkz -mRXVSdZtaYLBn/I+wtsLseZ85Zj0nwF9V6w2eBWpVG/mPlJUaKO8j5k/AgIf0iGx -OXBhTC9x5SfT0HVG11547oKlvW0SLQtAkmHAnoxxvtG7TyP+TvJ5oL1g+GLkmlsd -4KeZvTKyKXvKjGsagMhvs6qgnhsDqwIDAQABAoICAQCJTCnPkF4BU6zXL8jZ6qP5 -5rEqnt6bDBZoInTRl3m5mPXO0ok5PrlVpzjEGe2CVsYe17uRS9WVWYgeTqkYaZFi -EW0q4gqf5mQakIIpXUuk+QiuajI/TRs+yWE6avZ1bn6M+NaYSJN680DszooiqE2x -RnJObB1rQ+scAYAKfXJbl0NBOaPQQy5oofNy5m3cYYn7o8Tk9tNL4/kITlbvGNeE -pqx4kGBpZJsA1areSjXfqqJBT4lSzXaUOKdydC6gXNGoRZh7vJ36629ConrF3R77 -/qR00qzZFyVlFuI0ZOGxzwtK63/3LIs+BOYhaQ5bPM/2JFOXA6kKzcBuEFVkW5oq -APoST7hk1mVdMKDigaT5pmuB8JB9RC0w/oR3OXONImKYPf3fBUSU1hw6YyVZFA6c -6SKik3g/sWl0BZvqCJgU3v3qTLhVPXtiDj97g9pWdyfJBduE8Ft89OHljNbY2HBd -hyW+/XSjodWW1CRr4v1DNXjg880VOWzueptROviEwFkpxi6oKFBXWegWMW5kR03d -21XlzrB20XckTjK5c8jQ5lQG49CnX8MyYMfj6f0HNCbIghbKfMvO7fWY1sD7wAlL -DlLr5MLxal9Wm0Jx56DQ6ZgnSCU0ms2L0RT9IVESGWC1am9/FjMvmK+zdvS3uFgb -HzwxN+7XD+4klO7H2GQFIQKCAQEA/pKzCLvJLyX8/bu4U5J9Ndf/V1N8YW++IOdl -MZZw/QPZPJhg23Iw/9kGOPL0W1BqxFwaC6UWuR9YXLS+/GfGUlaeLbeGvMs3w3FH -W9RjCwLMnBu2JwUqJqSqc9dkQor0up8sa7sYOPqOrHupIFBxx/tV5o24BJ0xz2RH -eN8VdT/XejW2CY4UX9LGk0l8iPySGRx5d9MACrHwqmCMhTqiWAob7r3D+DQxqd4r -4q/lZ8ItKTzvrebHotBQcdMeIqIlQWG/chVKynxtB04zNOXwwtSxOKPsN1EysBsC -vklZ3FeYFipHKsmKX/COWDjnyKmG/iRVjZ/O5vZ0rsQl8iujbwKCAQEAz+9i0Wod -xrqX9Gd30JVANy5rz74wfvBy03J2T1KZmMxPhtVUloWU93952CiUpD2Xb6nwa00V -LxYfXlt2YrfV+2I3YP6TC8VXiX7uQ8i6tg2JAY40mrbuYoO3P1gfgdJ909TjLhrL -aNg+nCyJDePdeKbX0yMf4ukHBNbvSH65fkp1cl4uU1Wvb4tGNcyYcX1q953JP1ue -PwgysbuXz/chpHmw8pH/GSZ5FAxGvHwkBmA0BYhDcpETFfKfm2NEDO5xa/4GTHNi -o+d5/fotJmihY5IpyVlSai8Kox9mYUin6ntbFkCvK+x6m7859N1lPG0BJVJTD+Cx -AXI6QQDyl+kVhQKCAQEAxXfd0GR5xkzdVaSLcqgq391Qf9iOnrYi8TsMz842jsyx -ccNxPkfxokQiA4LR8RML/ozC102Ttr2NuTuq+fc1ayEtSaEWrtOjycLQ63Zv7Vaa -iG0melYTQC5y2bC2YLeQ5kIaHubd/zS7/yddJWfBGrLnCxPbLhkRTiInHqdM6co/ -xthrADZpr3q79fwG0eu5GClyP3Q4kBM+76o81guJamlNCX/Bx4IVFAL2X7y5YibJ -CTfvYyGksbKM8/4jXhIQfArqif/iJ/ckS4ppRhsnCroZTio5TR97BgettRUI01ZO -7sKUuafj4k+i2uQpRwnZYMGma1kPETETiY01MgiPmQKCAQEAyQcnAk8VeovrXN6r -d3zUGIVItg+p0w+j88k1mHrDBHaCbFjS7rM20hDsO48AJclmHw6s4RAk6uD4csD6 -M3aH6gGKiLuWbkrb1pJgyCfIWzm6u0ZAlVNGJPgysYsA6wIVpDatbGV7QmHOJi7o -UgV6mKq0/et3aGjh4EvsCqp5qx9RbMChCPBOLAj6WAj1WMNoJvzlE9v/ofDLEgnL -O8QxQlJkQB/mAOqxJDC6Mn/SVFet86tJifm3+gAXTqMpp1bfUQjGDiN/ufaQenrk -K738SceFnqQ8iWvxXMN+t48GyCt6ZIkk0dJOt0SpQ5LHzSOVd/+fTjps5nkI2M+R -ukweAQKCAQEA3dmHRAqs0gjvJ2gthayT0G7s8s6oObxfKYpRLw8Q8s+JxwZRVr0O -aTt1kYn2eXIdO12zLBspRiX+1tmbpD3hEoO+NPplvNsfwzbPtDYofYT1bD8J19JV -seFbdHlxNGBHaesjNLIsbTRPokATLtvhyQFNhS2SBV4OLiu3GzfSgGBMaPoSDnNN -+OHZ/0aunQkpOF90/LzFrhMYttXhkMSgXGyg4kZkg93HLVGOvz3/WIcaEh8Merqc -+pzLRW+nhJin0lDW8RfvAPOZlL6nTTUWZc6cr2yyJFxzw4AqvGhvCnD5Px9mPNpP -XM9QqgBE9ayYiJyup/gvGszbv/43ZOuHPg== ------END PRIVATE KEY----- diff --git a/vendor/github.com/prometheus/common/config/testdata/server.crt b/vendor/github.com/prometheus/common/config/testdata/server.crt index 2ead96984..87ad202fe 100644 --- a/vendor/github.com/prometheus/common/config/testdata/server.crt +++ b/vendor/github.com/prometheus/common/config/testdata/server.crt @@ -1,35 +1,35 @@ Certificate: Data: Version: 3 (0x2) - Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus TLS CA + Serial Number: 4 (0x4) + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green TLS CA Validity - Not Before: Apr 5 08:06:57 2019 GMT - Not After : Mar 26 08:06:57 2059 GMT - Subject: C=US, O=Prometheus, CN=prometheus.example.com + Not Before: Jul 26 12:47:08 2017 GMT + Not After : Jul 26 12:47:08 2019 GMT + Subject: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green TLS CA Subject Public Key Info: Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) + Public-Key: (2048 bit) Modulus: - 00:bd:6c:b6:7f:d1:2f:be:e4:41:eb:5d:ff:50:78: - 03:2b:76:03:da:01:48:20:13:90:66:c9:ce:6e:06: - e5:fa:2d:0d:c0:b0:46:28:44:10:a0:61:79:87:a2: - 98:4c:29:fa:f9:bb:0f:44:c7:90:5c:5c:55:60:cd: - 45:da:b8:e4:dd:28:72:c8:8b:a1:3e:4b:00:09:82: - b0:2c:dc:d6:17:c9:02:f4:cd:26:c7:11:28:f3:77: - b5:97:c2:76:c2:e0:07:d7:34:5b:e0:ed:1a:59:a5: - b4:b7:16:09:3d:35:bd:d9:03:07:9d:7c:3b:f0:63: - bd:5e:02:99:cf:32:e1:ac:4c:7a:3e:4c:b2:8e:98: - 68:07:4f:59:dc:0d:bf:cc:83:04:5c:d8:90:f0:73: - da:2b:08:17:c4:36:a7:d8:94:3d:b6:c0:af:29:0a: - d3:19:5f:eb:7d:cc:4d:05:56:11:0a:ee:b1:f3:d7: - c9:5a:3c:8c:57:16:91:51:14:f8:20:4e:0f:29:9e: - 04:21:e6:f1:e4:e8:44:af:d7:25:92:08:64:fc:2c: - 1c:2e:4f:71:53:91:53:1d:e5:f9:7b:52:0f:21:da: - 5c:dd:19:68:96:ca:70:6a:f1:c4:0d:07:af:f8:65: - 13:92:e9:ef:65:b3:89:86:fd:c0:74:5c:a4:6b:49: - 62:c5 + 00:97:43:c5:f6:24:b8:ce:30:12:70:ea:17:9c:c0: + ce:f2:ef:58:8b:12:7d:46:5e:01:f1:1a:93:b2:3e: + d8:cf:99:bc:10:32:f1:12:b0:ef:00:6c:d6:c4:45: + 85:a8:33:7b:cd:ec:8f:4a:92:d0:5a:4a:41:69:7f: + e3:dd:7e:71:d2:21:9c:df:43:b5:6c:60:bb:2a:12: + a8:08:cf:c5:ee:08:7d:48:ea:4b:54:e4:82:d9:88: + b0:b8:5e:02:12:cb:0e:09:99:b7:5f:42:b6:d7:26: + 34:0f:4a:e7:fc:ac:9c:59:cd:a1:50:4c:88:5f:f1: + d2:7e:5b:21:41:f0:37:50:80:48:71:50:26:61:26: + 79:64:4b:7e:91:8d:0e:f4:27:fe:19:80:bf:39:55: + b7:f3:d0:cd:61:6c:d8:c1:c7:d3:26:77:92:1a:14: + 42:56:cb:bc:fd:1a:4a:eb:17:d8:8d:af:d1:c0:46: + 9f:f0:40:5e:0e:34:2f:e7:db:be:66:fd:89:0b:6b: + 8c:71:c1:0b:0a:c5:c4:c4:eb:7f:44:c1:75:36:23: + fd:ed:b6:ee:87:d9:88:47:e1:4b:7c:60:53:e7:85: + 1c:2f:82:4b:2b:5e:63:1a:49:17:36:2c:fc:39:23: + 49:22:4d:43:b5:51:22:12:24:9e:31:44:d8:16:4e: + a8:eb Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Key Usage: critical @@ -39,58 +39,58 @@ Certificate: X509v3 Extended Key Usage: TLS Web Server Authentication, TLS Web Client Authentication X509v3 Subject Key Identifier: - 00:61:01:AD:25:44:8A:EF:E1:2C:EC:83:5A:3A:3B:EA:A0:BD:E1:45 + 70:A9:FB:44:66:3C:63:96:E6:05:B2:74:47:C8:18:7E:43:6D:EE:8B X509v3 Authority Key Identifier: - keyid:4D:02:BF:71:95:6A:AA:58:C5:9C:B8:83:67:5E:64:16:99:E1:2A:9E + keyid:AE:42:88:75:DD:05:A6:8E:48:7F:50:69:F9:B7:34:23:49:B8:B4:71 Authority Information Access: - CA Issuers - URI:http://example.com/ca/tls-ca.cer + CA Issuers - URI:http://green.no/ca/tls-ca.cer X509v3 CRL Distribution Points: Full Name: - URI:http://example.com/ca/tls-ca.crl + URI:http://green.no/ca/tls-ca.crl X509v3 Subject Alternative Name: IP Address:127.0.0.1, IP Address:127.0.0.0, DNS:localhost Signature Algorithm: sha1WithRSAEncryption - 77:97:e4:ef:db:10:8e:62:50:96:4a:6e:f5:a4:f9:1f:19:3b: - c8:a4:dd:b3:f6:11:41:1a:fb:e3:f8:dd:0e:64:e5:2b:00:b9: - e6:25:9f:2e:e1:d2:9a:cd:b6:f2:41:4d:27:dd:2c:9a:af:97: - 79:e8:cf:61:fb:cf:be:25:c6:e1:19:a0:c8:90:44:a0:76:8a: - 45:d4:37:22:e5:d4:80:b4:b3:0f:a8:33:08:24:ad:21:0b:b7: - 98:46:93:90:8a:ae:77:0c:cb:b8:59:d3:3b:9b:fb:16:5a:22: - ca:c2:97:9d:78:1b:fc:23:fc:a0:42:54:40:de:88:4b:07:2b: - 19:4e:0e:79:bf:c9:9f:01:a6:46:c5:55:fa:9f:c0:0d:8a:a6: - e1:47:16:a6:0e:be:23:c9:e9:58:d6:31:71:8c:80:9c:16:64: - f0:14:08:22:a1:23:7c:98:b9:62:d1:4a:ce:e3:5c:59:fb:41: - 87:a5:3b:36:dd:3d:45:48:b0:b0:77:6f:de:58:2a:27:4d:56: - 20:54:08:20:c8:6d:79:b5:b9:e6:3a:03:24:0f:6d:67:39:20: - 78:10:2f:47:85:83:c1:4d:17:33:79:84:75:27:fa:47:67:59: - 56:cc:33:7b:a5:77:aa:59:9a:98:30:10:1a:78:43:34:8f:ed: - c2:a1:a3:ea + 56:1e:b8:52:ba:f5:72:42:ad:15:71:c1:5e:00:63:c9:4d:56: + f2:8d:a3:a9:91:db:d0:b5:1b:88:80:93:80:28:48:b2:d0:a9: + d0:ea:de:40:78:cc:57:8c:00:b8:65:99:68:95:98:9b:fb:a2: + 43:21:ea:00:37:01:77:c7:3b:1a:ec:58:2d:25:9c:ad:23:41: + 5e:ae:fd:ac:2f:26:81:b8:a7:49:9b:5a:10:fe:ad:c3:86:ab: + 59:67:b0:c7:81:72:95:60:b5:cb:fc:9f:ad:27:16:50:85:76: + 33:16:20:2c:1f:c6:14:09:0c:48:9f:c0:19:16:c9:fa:b0:d8: + bf:b7:8d:a7:aa:eb:fe:f8:6f:dd:2b:83:ee:c7:8a:df:c8:59: + e6:2e:13:1f:57:cc:6f:31:db:f7:b7:5c:3f:78:ad:22:2c:48: + bb:6d:c4:ab:dc:c1:76:34:29:d9:1e:67:e0:ac:37:2b:90:f9: + 71:bd:cf:a1:01:b9:eb:0b:0b:79:2e:8b:52:3d:8e:13:97:c8: + 05:a3:ef:68:82:49:12:2a:25:1a:48:49:b8:7c:3c:66:0d:74: + f9:00:8c:5b:57:d7:76:b1:26:95:86:b2:2e:a3:b2:9c:e0:eb: + 2d:fc:77:03:8f:cd:56:46:3a:c9:6a:fa:72:e3:19:d8:ef:de: + 4b:36:95:79 -----BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADBpMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGjAYBgNVBAMMEVByb21ldGhldXMgVExTIENBMCAXDTE5 -MDQwNTA4MDY1N1oYDzIwNTkwMzI2MDgwNjU3WjBDMQswCQYDVQQGEwJVUzETMBEG -A1UECgwKUHJvbWV0aGV1czEfMB0GA1UEAwwWcHJvbWV0aGV1cy5leGFtcGxlLmNv -bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL1stn/RL77kQetd/1B4 -Ayt2A9oBSCATkGbJzm4G5fotDcCwRihEEKBheYeimEwp+vm7D0THkFxcVWDNRdq4 -5N0ocsiLoT5LAAmCsCzc1hfJAvTNJscRKPN3tZfCdsLgB9c0W+DtGlmltLcWCT01 -vdkDB518O/BjvV4Cmc8y4axMej5Mso6YaAdPWdwNv8yDBFzYkPBz2isIF8Q2p9iU -PbbArykK0xlf633MTQVWEQrusfPXyVo8jFcWkVEU+CBODymeBCHm8eToRK/XJZII -ZPwsHC5PcVORUx3l+XtSDyHaXN0ZaJbKcGrxxA0Hr/hlE5Lp72WziYb9wHRcpGtJ -YsUCAwEAAaOCAREwggENMA4GA1UdDwEB/wQEAwIFoDAJBgNVHRMEAjAAMB0GA1Ud -JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUAGEBrSVEiu/hLOyD -Wjo76qC94UUwHwYDVR0jBBgwFoAUTQK/cZVqqljFnLiDZ15kFpnhKp4wPAYIKwYB -BQUHAQEEMDAuMCwGCCsGAQUFBzAChiBodHRwOi8vZXhhbXBsZS5jb20vY2EvdGxz -LWNhLmNlcjAxBgNVHR8EKjAoMCagJKAihiBodHRwOi8vZXhhbXBsZS5jb20vY2Ev -dGxzLWNhLmNybDAgBgNVHREEGTAXhwR/AAABhwR/AAAAgglsb2NhbGhvc3QwDQYJ -KoZIhvcNAQEFBQADggEBAHeX5O/bEI5iUJZKbvWk+R8ZO8ik3bP2EUEa++P43Q5k -5SsAueYlny7h0prNtvJBTSfdLJqvl3noz2H7z74lxuEZoMiQRKB2ikXUNyLl1IC0 -sw+oMwgkrSELt5hGk5CKrncMy7hZ0zub+xZaIsrCl514G/wj/KBCVEDeiEsHKxlO -Dnm/yZ8BpkbFVfqfwA2KpuFHFqYOviPJ6VjWMXGMgJwWZPAUCCKhI3yYuWLRSs7j -XFn7QYelOzbdPUVIsLB3b95YKidNViBUCCDIbXm1ueY6AyQPbWc5IHgQL0eFg8FN -FzN5hHUn+kdnWVbMM3uld6pZmpgwEBp4QzSP7cKho+o= +MIIEQjCCAyqgAwIBAgIBBDANBgkqhkiG9w0BAQUFADBdMQswCQYDVQQGEwJOTzER +MA8GA1UECgwIR3JlZW4gQVMxJDAiBgNVBAsMG0dyZWVuIENlcnRpZmljYXRlIEF1 +dGhvcml0eTEVMBMGA1UEAwwMR3JlZW4gVExTIENBMB4XDTE3MDcyNjEyNDcwOFoX +DTE5MDcyNjEyNDcwOFowXTELMAkGA1UEBhMCTk8xETAPBgNVBAoMCEdyZWVuIEFT +MSQwIgYDVQQLDBtHcmVlbiBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFTATBgNVBAMM +DEdyZWVuIFRMUyBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJdD +xfYkuM4wEnDqF5zAzvLvWIsSfUZeAfEak7I+2M+ZvBAy8RKw7wBs1sRFhagze83s +j0qS0FpKQWl/491+cdIhnN9DtWxguyoSqAjPxe4IfUjqS1TkgtmIsLheAhLLDgmZ +t19CttcmNA9K5/ysnFnNoVBMiF/x0n5bIUHwN1CASHFQJmEmeWRLfpGNDvQn/hmA +vzlVt/PQzWFs2MHH0yZ3khoUQlbLvP0aSusX2I2v0cBGn/BAXg40L+fbvmb9iQtr +jHHBCwrFxMTrf0TBdTYj/e227ofZiEfhS3xgU+eFHC+CSyteYxpJFzYs/DkjSSJN +Q7VRIhIknjFE2BZOqOsCAwEAAaOCAQswggEHMA4GA1UdDwEB/wQEAwIFoDAJBgNV +HRMEAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQU +cKn7RGY8Y5bmBbJ0R8gYfkNt7oswHwYDVR0jBBgwFoAUrkKIdd0Fpo5If1Bp+bc0 +I0m4tHEwOQYIKwYBBQUHAQEELTArMCkGCCsGAQUFBzAChh1odHRwOi8vZ3JlZW4u +bm8vY2EvdGxzLWNhLmNlcjAuBgNVHR8EJzAlMCOgIaAfhh1odHRwOi8vZ3JlZW4u +bm8vY2EvdGxzLWNhLmNybDAgBgNVHREEGTAXhwR/AAABhwR/AAAAgglsb2NhbGhv +c3QwDQYJKoZIhvcNAQEFBQADggEBAFYeuFK69XJCrRVxwV4AY8lNVvKNo6mR29C1 +G4iAk4AoSLLQqdDq3kB4zFeMALhlmWiVmJv7okMh6gA3AXfHOxrsWC0lnK0jQV6u +/awvJoG4p0mbWhD+rcOGq1lnsMeBcpVgtcv8n60nFlCFdjMWICwfxhQJDEifwBkW +yfqw2L+3jaeq6/74b90rg+7Hit/IWeYuEx9XzG8x2/e3XD94rSIsSLttxKvcwXY0 +KdkeZ+CsNyuQ+XG9z6EBuesLC3kui1I9jhOXyAWj72iCSRIqJRpISbh8PGYNdPkA +jFtX13axJpWGsi6jspzg6y38dwOPzVZGOslq+nLjGdjv3ks2lXk= -----END CERTIFICATE----- diff --git a/vendor/github.com/prometheus/common/config/testdata/server.key b/vendor/github.com/prometheus/common/config/testdata/server.key index e1226c0e1..126c1b5d0 100644 --- a/vendor/github.com/prometheus/common/config/testdata/server.key +++ b/vendor/github.com/prometheus/common/config/testdata/server.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9bLZ/0S++5EHr -Xf9QeAMrdgPaAUggE5Bmyc5uBuX6LQ3AsEYoRBCgYXmHophMKfr5uw9Ex5BcXFVg -zUXauOTdKHLIi6E+SwAJgrAs3NYXyQL0zSbHESjzd7WXwnbC4AfXNFvg7RpZpbS3 -Fgk9Nb3ZAwedfDvwY71eApnPMuGsTHo+TLKOmGgHT1ncDb/MgwRc2JDwc9orCBfE -NqfYlD22wK8pCtMZX+t9zE0FVhEK7rHz18laPIxXFpFRFPggTg8pngQh5vHk6ESv -1yWSCGT8LBwuT3FTkVMd5fl7Ug8h2lzdGWiWynBq8cQNB6/4ZROS6e9ls4mG/cB0 -XKRrSWLFAgMBAAECggEAezQ0V1o11dEc1vuiTjJgzWnLA4aF5OcUquZjb8jo2Blp -soR0fUgYEFiV9RRaPl+nr7ptKe0rBgfAOGALKUHNCdN/JNU8oQmjEoyADg3s6jeB -xruQlzWgDwszf2uqVwHj16Nkhx1wYBKZQeQBSmCkBHwl/daKHcahqn3CkLOleKx+ -Qlc3BzWNaGte6qpJMs0It3by1FuxRwVz5VkL8uhzj0WIOYMA84t0gTnFH9gfRO3F -licotxg/Nl5M36wWcfL8Jq++72AtaKcD1jUEwuQpogrVeqflmeHwn/TlL++Hv6Xe -Lq0jt3OCUKUV40eq9c5uEgTmyrVHMDkfFdXzutdMAQKBgQDsSMXk7P4SX6u6uTjV -In9eWw6ZyJ2aL6VB9co/NMsj49GrrFT8VX9d+JPe9P/n6tuGcFbymNep22njRksR -0ItpW1NFRR/R3g0kYe1EhkRpNm6fhY9oIuR9xhcNnPNYkqAKT3T/dxrzbwsNhomi -X8aht/eCz4ZsK/KdOGTkPozxgQKBgQDNOvrclT1Wl4bxONp9pEV5XpRSD/qigfIp -i5wxy7ihX/QY9RToIWJDnzMVLnEYe64RB2WB8/4WwNPOQcuaxXbFUFct/2NdhTnS -ToJPgPe819zW9t1FLTf1fHtsRBpGFtbhdlUDOiOtJiMXYiwlRh2uyWFhjOo8TNUE -qMwai0vLRQKBgQCDH4t6lC4W4jK5x2oLlT5bjWqX2uXjF8e8x/q5gsGspBPKEjOD -aKrq6jSdSRbui73RaGxH6pvb7iBf+LVWKIYFLKIUUdzrqS9f3lw+Z8h1HrjbG9JO -dvaX+aL3cf71S0E3F4sU7fLt3tSiZ+PfUQk424+mbyXox6a2qwIKS9AJgQKBgHCu -dHROYJo9ojKpo5Ueb6K+4jLYYSV+sYZMCBtzHlFETNKzJaJ6SeiU7Ugw8pmdtqnU -5M/gNl8pymFR0MeOqbKWdPdlZJpBfsjQoE2kouEFqFRCwKStui7IBUAheEeJXLv3 -659U+aek69l35oMkp0GDgjs8UpN/H+pp/36Hgrr9AoGAftWU405rpStHEdRVrazP -FibQesT9HOdJgmm1gNIhj+PnFs7lKER9p0Wdl79QnIqjwyhjCXL94TFerzTKLY2c -IRj5dcRHiiT0iK8wq8bzGNYCqV73oQXaUFMiutNAArXwzwuvPFPWNBQsjLzeDLeC -mcOsCcPAk8cLYtVfZo2sP3g= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCXQ8X2JLjOMBJw +6hecwM7y71iLEn1GXgHxGpOyPtjPmbwQMvESsO8AbNbERYWoM3vN7I9KktBaSkFp +f+PdfnHSIZzfQ7VsYLsqEqgIz8XuCH1I6ktU5ILZiLC4XgISyw4JmbdfQrbXJjQP +Suf8rJxZzaFQTIhf8dJ+WyFB8DdQgEhxUCZhJnlkS36RjQ70J/4ZgL85Vbfz0M1h +bNjBx9Mmd5IaFEJWy7z9GkrrF9iNr9HARp/wQF4ONC/n275m/YkLa4xxwQsKxcTE +639EwXU2I/3ttu6H2YhH4Ut8YFPnhRwvgksrXmMaSRc2LPw5I0kiTUO1USISJJ4x +RNgWTqjrAgMBAAECggEAVurwo4FyV7gzwIIi00XPJLT3ceJL7dUy1HHrEG8gchnq +gHxlHdJhYyMnPVydcosyxp75r2YxJtCoSZDdRHbVvGLoGzpy0zW6FnDl8TpCh4aF +RxKp+rvbnFf5A9ew5U+cX1PelHRnT7V6EJeAOiaNKOUJnnR7oHX59/UxZQw9HJnX +3H4xUdRDmSS3BGKXEswbd7beQjqJtEIkbConfaw32yEod0w2MC0LI4miZ87/6Hsk +pyvfpeYxXp4z3BTvFBbf/GEBFuozu63VWHayB9PDmEN/TlphoQpJQihdR2r1lz/H +I5QwVlFTDvUSFitNLu+FoaHOfgLprQndbojBXb+tcQKBgQDHCPyM4V7k97RvJgmB +ELgZiDYufDrjRLXvFzrrZ7ySU3N+nx3Gz/EhtgbHicDjnRVagHBIwi/QAfBJksCd +xcioY5k2OW+8PSTsfFZTAA6XwJp/LGfJik/JjvAVv5CnxBu9lYG4WiSBJFp59ojC +zTmfEuB4GPwrjQvzjlqaSpij9QKBgQDCjriwAB2UJIdlgK+DkryLqgim5I4cteB3 ++juVKz+S8ufFmVvmIXkyDcpyy/26VLC6esy8dV0JoWc4EeitoJvQD1JVZ5+CBTY+ +r9umx18oe2A/ZgcEf/A3Zd94jM1MwriF6YC+eIOhwhpi7T1xTLf3hc9B0OJ5B1mA +vob9rGDtXwKBgD4rkW+UCictNIAvenKFPWxEPuBgT6ij0sx/DhlwCtgOFxprK0rp +syFbkVyMq+KtM3lUez5O4c5wfJUOsPnXSOlISxhD8qHy23C/GdvNPcGrGNc2kKjE +ek20R0wTzWSJ/jxG0gE6rwJjz5sfJfLrVd9ZbyI0c7hK03vdcHGXcXxtAoGAeGHl +BwnbQ3niyTx53VijD2wTVGjhQgSLstEDowYSnTNtk8eTpG6b1gvQc32jLnMOsyQe +oJGiEr5q5re2GBDjuDZyxGOMv9/Hs7wOlkCQsbS9Vh0kRHWBRlXjk2zT7yYhFMLp +pXFeSW2X9BRFS2CkCCUkm93K9AZHLDE3x6ishNMCgYEAsDsUCzGhI49Aqe+CMP2l +WPZl7SEMYS5AtdC5sLtbLYBl8+rMXVGL2opKXqVFYBYkqMJiHGdX3Ub6XSVKLYkN +vm4PWmlQS24ZT+jlUl4jk6JU6SAlM/o6ixZl5KNR7yQm6zN2O/RHDeYm0urUQ9tF +9dux7LbIFeOoJmoDTWG2+fI= -----END PRIVATE KEY----- diff --git a/vendor/github.com/prometheus/common/config/testdata/tls-ca-chain.pem b/vendor/github.com/prometheus/common/config/testdata/tls-ca-chain.pem index 722264d89..03e4189e6 100644 --- a/vendor/github.com/prometheus/common/config/testdata/tls-ca-chain.pem +++ b/vendor/github.com/prometheus/common/config/testdata/tls-ca-chain.pem @@ -2,34 +2,34 @@ Certificate: Data: Version: 3 (0x2) Serial Number: 2 (0x2) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green Root CA Validity - Not Before: Apr 5 08:00:37 2019 GMT - Not After : Mar 26 08:00:37 2059 GMT - Subject: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus TLS CA + Not Before: Jul 13 03:47:20 2017 GMT + Not After : Jul 13 03:47:20 2027 GMT + Subject: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green TLS CA Subject Public Key Info: Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) + Public-Key: (2048 bit) Modulus: - 00:aa:d2:34:6b:ed:f1:f4:01:08:e5:00:9f:75:c8: - ba:fc:4b:72:c6:04:93:af:f1:f6:b5:ce:01:0d:c6: - bd:d3:16:98:9d:e5:51:56:12:58:16:ee:18:6e:f0: - 68:a9:42:16:65:cf:e3:31:f5:90:79:9d:13:32:87: - 3b:1f:65:fd:84:88:a4:56:3d:26:54:69:05:27:5a: - ea:89:02:e7:31:9b:7d:7f:76:93:54:70:bc:17:92: - 06:9f:9f:90:4a:8a:cf:82:a7:7b:7c:71:c4:fa:34: - 56:00:32:1a:85:c5:f8:e4:4a:63:43:37:9d:60:84: - 4d:78:6e:87:12:c4:2b:1f:93:a5:fe:cc:5e:f1:df: - c1:97:ff:b7:3e:20:38:1d:71:15:11:ec:6c:7a:cc: - 0e:87:52:31:b1:b9:74:c3:07:1c:42:4b:1e:c1:17: - bc:e4:13:b7:b0:20:2e:c4:07:93:bd:a8:11:f9:da: - a7:d0:df:4a:48:be:9b:6d:65:c3:ae:58:56:c0:9f: - 17:c5:d8:32:b1:04:22:fb:5b:18:f6:20:10:50:ec: - 2d:10:4f:cc:48:8f:f2:75:dd:33:a4:0e:f5:55:da: - 2c:89:a1:3a:52:bb:11:11:0b:97:27:17:73:35:da: - 10:71:b3:9f:a8:42:91:e6:3a:66:00:f9:e5:11:8f: - 5b:57 + 00:b5:5a:b3:7a:7f:6a:5b:e9:ee:62:ee:4f:61:42: + 79:93:06:bf:81:fc:9a:1f:b5:80:83:7c:b3:a6:94: + 54:58:8a:b1:74:cb:c3:b8:3c:23:a8:69:1f:ca:2b: + af:be:97:ba:31:73:b5:b8:ce:d9:bf:bf:9a:7a:cf: + 3a:64:51:83:c9:36:d2:f7:3b:3a:0e:4c:c7:66:2e: + bf:1a:df:ce:10:aa:3d:0f:19:74:03:7e:b5:10:bb: + e8:37:bd:62:f0:42:2d:df:3d:ca:70:50:10:17:ce: + a9:ec:55:8e:87:6f:ce:9a:04:36:14:96:cb:d1:a5: + 48:d5:d2:87:02:62:93:4e:21:4a:ff:be:44:f1:d2: + 7e:ed:74:da:c2:51:26:8e:03:a0:c2:bd:bd:5f:b0: + 50:11:78:fd:ab:1d:04:86:6c:c1:8d:20:bd:05:5f: + 51:67:c6:d3:07:95:92:2d:92:90:00:c6:9f:2d:dd: + 36:5c:dc:78:10:7c:f6:68:39:1d:2c:e0:e1:26:64: + 4f:36:34:66:a7:84:6a:90:15:3a:94:b7:79:b1:47: + f5:d2:51:95:54:bf:92:76:9a:b9:88:ee:63:f9:6c: + 0d:38:c6:b6:1c:06:43:ed:24:1d:bb:6c:72:48:cc: + 8c:f4:35:bc:43:fe:a6:96:4c:31:5f:82:0d:0d:20: + 2a:3d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Key Usage: critical @@ -37,91 +37,90 @@ Certificate: X509v3 Basic Constraints: critical CA:TRUE, pathlen:0 X509v3 Subject Key Identifier: - 4D:02:BF:71:95:6A:AA:58:C5:9C:B8:83:67:5E:64:16:99:E1:2A:9E + AE:42:88:75:DD:05:A6:8E:48:7F:50:69:F9:B7:34:23:49:B8:B4:71 X509v3 Authority Key Identifier: - keyid:3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A + keyid:60:93:53:2F:C7:CF:2A:D7:F3:09:28:F6:3C:AE:9C:50:EC:93:63:E5 Authority Information Access: - CA Issuers - URI:https://example.com/ca/root-ca.cer + CA Issuers - URI:http://green.no/ca/root-ca.cer X509v3 CRL Distribution Points: Full Name: - URI:https://example.com/ca/root-ca.crl + URI:http://green.no/ca/root-ca.crl Signature Algorithm: sha1WithRSAEncryption - 63:fc:ba:30:a5:05:d6:76:14:f1:77:38:b1:41:6f:81:d9:b4: - 02:fd:bc:e5:f6:d9:e6:73:e0:71:cf:4c:fb:13:b5:6b:bd:b9: - c6:f6:28:18:36:e1:8c:d9:93:b3:78:4a:3d:39:1b:f4:fb:69: - 75:24:ae:e1:a0:2f:94:05:bf:10:3c:3e:d2:2b:a8:f3:31:25: - 2e:ed:13:ad:60:5d:22:9a:26:15:20:86:98:73:4c:f6:4b:48: - b8:1f:67:ba:4e:c9:47:ed:85:dc:38:dc:02:0c:fb:54:d5:2e: - 6c:b4:95:18:51:d1:ae:ea:e8:fb:b4:19:50:04:bc:31:7e:51: - 9e:85:29:4d:c8:f7:26:d6:d6:8d:35:2d:9e:e2:06:16:38:e2: - 56:80:ec:f3:a3:34:e3:28:c4:e8:10:d0:8a:a6:6f:20:9a:b9: - dc:b9:90:6b:ba:8a:27:2c:29:72:28:55:e7:59:a6:a7:90:ec: - 32:e8:d0:26:4a:c1:44:dd:20:bf:dc:4d:1e:7e:cc:e5:a2:5b: - e8:df:3d:4b:01:aa:48:56:17:e9:29:d8:71:83:05:36:8c:11: - 4f:77:b8:95:20:b7:c7:21:06:c2:87:97:b4:6b:d3:f7:23:ba: - 4d:5f:15:d1:0c:4d:6e:f1:6a:9d:57:5c:02:6a:d7:31:18:ef: - 5c:fc:f8:04 + 15:a7:ac:d7:25:9e:2a:d4:d1:14:b4:99:38:3d:2f:73:61:2a: + d9:b6:8b:13:ea:fe:db:78:d9:0a:6c:df:26:6e:c1:d5:4a:97: + 42:19:dd:97:05:03:e4:2b:fc:1e:1f:38:3c:4e:b0:3b:8c:38: + ad:2b:65:fa:35:2d:81:8e:e0:f6:0a:89:4c:38:97:01:4b:9c: + ac:4e:e1:55:17:ef:0a:ad:a7:eb:1e:4b:86:23:12:f1:52:69: + cb:a3:8a:ce:fb:14:8b:86:d7:bb:81:5e:bd:2a:c7:a7:79:58: + 00:10:c0:db:ff:d4:a5:b9:19:74:b3:23:19:4a:1f:78:4b:a8: + b6:f6:20:26:c1:69:f9:89:7f:b8:1c:3b:a2:f9:37:31:80:2c: + b0:b6:2b:d2:84:44:d7:42:e4:e6:44:51:04:35:d9:1c:a4:48: + c6:b7:35:de:f2:ae:da:4b:ba:c8:09:42:8d:ed:7a:81:dc:ed: + 9d:f0:de:6e:21:b9:01:1c:ad:64:3d:25:4c:91:94:f1:13:18: + bb:89:e9:48:ac:05:73:07:c8:db:bd:69:8e:6f:02:9d:b0:18: + c0:b9:e1:a8:b1:17:50:3d:ac:05:6e:6f:63:4f:b1:73:33:60: + 9a:77:d2:81:8a:01:38:43:e9:4c:3c:90:63:a4:99:4b:d2:1b: + f9:1b:ec:ee -----BEGIN CERTIFICATE----- -MIIELTCCAxWgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGzAZBgNVBAMMElByb21ldGhldXMgUm9vdCBDQTAgFw0x -OTA0MDUwODAwMzdaGA8yMDU5MDMyNjA4MDAzN1owaTELMAkGA1UEBhMCVVMxEzAR -BgNVBAoMClByb21ldGhldXMxKTAnBgNVBAsMIFByb21ldGhldXMgQ2VydGlmaWNh -dGUgQXV0aG9yaXR5MRowGAYDVQQDDBFQcm9tZXRoZXVzIFRMUyBDQTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAKrSNGvt8fQBCOUAn3XIuvxLcsYEk6/x -9rXOAQ3GvdMWmJ3lUVYSWBbuGG7waKlCFmXP4zH1kHmdEzKHOx9l/YSIpFY9JlRp -BSda6okC5zGbfX92k1RwvBeSBp+fkEqKz4Kne3xxxPo0VgAyGoXF+ORKY0M3nWCE -TXhuhxLEKx+Tpf7MXvHfwZf/tz4gOB1xFRHsbHrMDodSMbG5dMMHHEJLHsEXvOQT -t7AgLsQHk72oEfnap9DfSki+m21lw65YVsCfF8XYMrEEIvtbGPYgEFDsLRBPzEiP -8nXdM6QO9VXaLImhOlK7ERELlycXczXaEHGzn6hCkeY6ZgD55RGPW1cCAwEAAaOB -3DCB2TAOBgNVHQ8BAf8EBAMCAQYwEgYDVR0TAQH/BAgwBgEB/wIBADAdBgNVHQ4E -FgQUTQK/cZVqqljFnLiDZ15kFpnhKp4wHwYDVR0jBBgwFoAUPB6oxkwFTSDsiNsp -1Hv5El3O6howPgYIKwYBBQUHAQEEMjAwMC4GCCsGAQUFBzAChiJodHRwczovL2V4 -YW1wbGUuY29tL2NhL3Jvb3QtY2EuY2VyMDMGA1UdHwQsMCowKKAmoCSGImh0dHBz -Oi8vZXhhbXBsZS5jb20vY2Evcm9vdC1jYS5jcmwwDQYJKoZIhvcNAQEFBQADggEB -AGP8ujClBdZ2FPF3OLFBb4HZtAL9vOX22eZz4HHPTPsTtWu9ucb2KBg24YzZk7N4 -Sj05G/T7aXUkruGgL5QFvxA8PtIrqPMxJS7tE61gXSKaJhUghphzTPZLSLgfZ7pO -yUfthdw43AIM+1TVLmy0lRhR0a7q6Pu0GVAEvDF+UZ6FKU3I9ybW1o01LZ7iBhY4 -4laA7POjNOMoxOgQ0IqmbyCaudy5kGu6iicsKXIoVedZpqeQ7DLo0CZKwUTdIL/c -TR5+zOWiW+jfPUsBqkhWF+kp2HGDBTaMEU93uJUgt8chBsKHl7Rr0/cjuk1fFdEM -TW7xap1XXAJq1zEY71z8+AQ= +MIIECzCCAvOgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJOTzER +MA8GA1UECgwIR3JlZW4gQVMxJDAiBgNVBAsMG0dyZWVuIENlcnRpZmljYXRlIEF1 +dGhvcml0eTEWMBQGA1UEAwwNR3JlZW4gUm9vdCBDQTAeFw0xNzA3MTMwMzQ3MjBa +Fw0yNzA3MTMwMzQ3MjBaMF0xCzAJBgNVBAYTAk5PMREwDwYDVQQKDAhHcmVlbiBB +UzEkMCIGA1UECwwbR3JlZW4gQ2VydGlmaWNhdGUgQXV0aG9yaXR5MRUwEwYDVQQD +DAxHcmVlbiBUTFMgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1 +WrN6f2pb6e5i7k9hQnmTBr+B/JoftYCDfLOmlFRYirF0y8O4PCOoaR/KK6++l7ox +c7W4ztm/v5p6zzpkUYPJNtL3OzoOTMdmLr8a384Qqj0PGXQDfrUQu+g3vWLwQi3f +PcpwUBAXzqnsVY6Hb86aBDYUlsvRpUjV0ocCYpNOIUr/vkTx0n7tdNrCUSaOA6DC +vb1fsFAReP2rHQSGbMGNIL0FX1FnxtMHlZItkpAAxp8t3TZc3HgQfPZoOR0s4OEm +ZE82NGanhGqQFTqUt3mxR/XSUZVUv5J2mrmI7mP5bA04xrYcBkPtJB27bHJIzIz0 +NbxD/qaWTDFfgg0NICo9AgMBAAGjgdQwgdEwDgYDVR0PAQH/BAQDAgEGMBIGA1Ud +EwEB/wQIMAYBAf8CAQAwHQYDVR0OBBYEFK5CiHXdBaaOSH9Qafm3NCNJuLRxMB8G +A1UdIwQYMBaAFGCTUy/HzyrX8wko9jyunFDsk2PlMDoGCCsGAQUFBwEBBC4wLDAq +BggrBgEFBQcwAoYeaHR0cDovL2dyZWVuLm5vL2NhL3Jvb3QtY2EuY2VyMC8GA1Ud +HwQoMCYwJKAioCCGHmh0dHA6Ly9ncmVlbi5uby9jYS9yb290LWNhLmNybDANBgkq +hkiG9w0BAQUFAAOCAQEAFaes1yWeKtTRFLSZOD0vc2Eq2baLE+r+23jZCmzfJm7B +1UqXQhndlwUD5Cv8Hh84PE6wO4w4rStl+jUtgY7g9gqJTDiXAUucrE7hVRfvCq2n +6x5LhiMS8VJpy6OKzvsUi4bXu4FevSrHp3lYABDA2//UpbkZdLMjGUofeEuotvYg +JsFp+Yl/uBw7ovk3MYAssLYr0oRE10Lk5kRRBDXZHKRIxrc13vKu2ku6yAlCje16 +gdztnfDebiG5ARytZD0lTJGU8RMYu4npSKwFcwfI271pjm8CnbAYwLnhqLEXUD2s +BW5vY0+xczNgmnfSgYoBOEPpTDyQY6SZS9Ib+Rvs7g== -----END CERTIFICATE----- Certificate: Data: Version: 3 (0x2) Serial Number: 1 (0x1) - Signature Algorithm: sha1WithRSAEncryption - Issuer: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA + Signature Algorithm: sha1WithRSAEncryption + Issuer: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green Root CA Validity - Not Before: Apr 5 07:55:00 2019 GMT - Not After : Mar 26 07:55:00 2059 GMT - Subject: C=US, O=Prometheus, OU=Prometheus Certificate Authority, CN=Prometheus Root CA + Not Before: Jul 13 03:44:39 2017 GMT + Not After : Dec 31 23:59:59 2030 GMT + Subject: C=NO, O=Green AS, OU=Green Certificate Authority, CN=Green Root CA Subject Public Key Info: Public Key Algorithm: rsaEncryption - RSA Public-Key: (2048 bit) + Public-Key: (2048 bit) Modulus: - 00:bf:b9:e2:ab:5f:61:22:e1:4e:cd:ee:da:b0:26: - 2e:bb:b0:7e:1c:ce:10:be:16:29:35:0c:0c:1d:93: - 01:29:2a:f6:f9:c2:6e:5c:10:44:ca:f8:dc:ad:7a: - 06:64:0f:8a:18:ad:b2:a2:94:49:c9:ba:8c:45:94: - 7c:d9:e0:11:45:d8:16:79:a2:20:9f:8c:63:60:72: - 2a:5b:f9:66:80:ac:85:67:01:5a:eb:91:c1:d2:88: - 87:9e:4c:18:c9:f2:f0:7a:18:c0:e6:ab:2c:78:de: - 5f:b2:22:4e:94:9c:f5:cd:e6:e2:33:30:e9:20:10: - a6:a1:75:eb:59:ab:45:a9:f7:3e:54:40:ae:05:25: - be:74:c5:3a:fd:af:73:16:60:45:7c:4a:e0:0e:0d: - a1:15:7f:9a:1f:c2:a7:04:ad:ef:b3:e4:f6:00:2c: - 4e:0b:04:90:49:ee:d3:db:a6:12:c4:91:0b:32:4f: - 11:84:c7:c4:8a:ef:51:66:7a:b0:20:2f:cb:95:8d: - 96:57:60:66:5e:f9:4f:5a:94:9c:71:ad:eb:ca:70: - 3e:62:06:c2:3a:29:f8:9e:86:af:da:07:78:f8:31: - af:42:48:49:9e:4a:df:1b:27:1f:44:35:81:6d:fa: - 7a:c5:6a:0a:35:23:c7:c4:d5:fe:c9:9e:61:c9:30: - cd:1f + 00:a7:e8:ed:de:d4:54:08:41:07:40:d5:c0:43:d6: + ab:d3:9e:21:87:c6:13:bf:a7:cf:3d:08:4f:c1:fe: + 8f:e5:6c:c5:89:97:e5:27:75:26:c3:2a:73:2d:34: + 7c:6f:35:8d:40:66:61:05:c0:eb:e9:b3:38:47:f8: + 8b:26:35:2c:df:dc:24:31:fe:72:e3:87:10:d1:f7: + a0:57:b7:f3:b1:1a:fe:c7:4b:f8:7b:14:6d:73:08: + 54:eb:63:3c:0c:ce:22:95:5f:3f:f2:6f:89:ae:63: + da:80:74:36:21:13:e8:91:01:58:77:cc:c2:f2:42: + bf:eb:b3:60:a7:21:ed:88:24:7f:eb:ff:07:41:9b: + 93:c8:5f:6a:8e:a6:1a:15:3c:bc:e7:0d:fd:05:fd: + 3c:c1:1c:1d:1f:57:2b:40:27:62:a1:7c:48:63:c1: + 45:e7:2f:20:ed:92:1c:42:94:e4:58:70:7a:b6:d2: + 85:c5:61:d8:cd:c6:37:6b:72:3b:7f:af:55:81:d6: + 9d:dc:10:c9:d8:0e:81:e4:5e:40:13:2f:20:e8:6b: + 46:81:ce:88:47:dd:38:71:3d:ef:21:cc:c0:67:cf: + 0a:f4:e9:3f:a8:9d:26:25:2e:23:1e:a3:11:18:cb: + d1:70:1c:9e:7d:09:b1:a4:20:dc:95:15:1d:49:cf: + 1b:ad Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Key Usage: critical @@ -129,45 +128,45 @@ Certificate: X509v3 Basic Constraints: critical CA:TRUE X509v3 Subject Key Identifier: - 3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A + 60:93:53:2F:C7:CF:2A:D7:F3:09:28:F6:3C:AE:9C:50:EC:93:63:E5 X509v3 Authority Key Identifier: - keyid:3C:1E:A8:C6:4C:05:4D:20:EC:88:DB:29:D4:7B:F9:12:5D:CE:EA:1A + keyid:60:93:53:2F:C7:CF:2A:D7:F3:09:28:F6:3C:AE:9C:50:EC:93:63:E5 Signature Algorithm: sha1WithRSAEncryption - 56:2f:79:e5:12:91:f5:19:a7:d1:32:28:fd:e3:9d:8f:e1:3c: - bb:a3:a5:f2:55:8a:03:ad:2c:1d:18:82:e1:7f:19:75:d9:47: - 5b:e7:7c:e4:a5:e0:eb:dc:7e:24:a3:7d:99:1a:cf:39:ba:a5: - b4:b8:45:68:83:cf:70:ad:56:f2:34:73:65:fc:6c:b0:53:9a: - 79:04:f7:3e:7e:4b:22:1b:e7:76:23:20:bc:9c:05:a2:5d:01: - d2:f0:09:49:17:b2:61:74:1a:5b:f4:e0:fd:ce:11:ba:13:4a: - e6:07:11:7d:30:e2:11:87:ee:33:1a:68:de:67:f4:ac:b5:58: - 1a:ac:cf:7a:2d:fd:c3:44:5b:4b:cd:6c:ff:f6:49:b4:55:4a: - 09:a0:92:2d:57:3b:69:85:54:3e:e9:ec:ef:b2:a5:7a:29:75: - 2b:f8:eb:4b:d4:cf:68:ee:3e:c8:63:7e:12:eb:e4:2f:63:a3: - a7:c8:0f:e9:39:ff:5c:29:65:7f:25:f0:42:bf:07:ba:06:b8: - 5e:d6:56:ba:f8:67:56:1b:42:aa:b3:04:d8:6e:88:10:a5:70: - b5:81:04:a4:90:a3:f0:83:4d:0c:6b:12:5d:a4:4c:83:5a:ff: - a8:7a:86:61:ff:0f:4c:e5:0f:17:d1:64:3c:bd:d9:22:7e:b7: - fa:9b:83:ba + a7:77:71:8b:1a:e5:5a:5b:87:54:08:bf:07:3e:cb:99:2f:dc: + 0e:8d:63:94:95:83:19:c9:92:82:d5:cb:5b:8f:1f:86:55:bc: + 70:01:1d:33:46:ec:99:de:6b:1f:c3:c2:7a:dd:ef:69:ab:96: + 58:ec:6c:6f:6c:70:82:71:8a:7f:f0:3b:80:90:d5:64:fa:80: + 27:b8:7b:50:69:98:4b:37:99:ad:bf:a2:5b:93:22:5e:96:44: + 3c:5a:cf:0c:f4:62:63:4a:6f:72:a7:f6:89:1d:09:26:3d:8f: + a8:86:d4:b4:bc:dd:b3:38:ca:c0:59:16:8c:20:1f:89:35:12: + b4:2d:c0:e9:de:93:e0:39:76:32:fc:80:db:da:44:26:fd:01: + 32:74:97:f8:44:ae:fe:05:b1:34:96:13:34:56:73:b4:93:a5: + 55:56:d1:01:51:9d:9c:55:e7:38:53:28:12:4e:38:72:0c:8f: + bd:91:4c:45:48:3b:e1:0d:03:5f:58:40:c9:d3:a0:ac:b3:89: + ce:af:27:8a:0f:ab:ec:72:4d:40:77:30:6b:36:fd:32:46:9f: + ee:f9:c4:f5:17:06:0f:4b:d3:88:f5:a4:2f:3d:87:9e:f5:26: + 74:f0:c9:dc:cb:ad:d9:a7:8a:d3:71:15:00:d3:5d:9f:4c:59: + 3e:24:63:f5 -----BEGIN CERTIFICATE----- -MIIDtDCCApygAwIBAgIBATANBgkqhkiG9w0BAQUFADBqMQswCQYDVQQGEwJVUzET -MBEGA1UECgwKUHJvbWV0aGV1czEpMCcGA1UECwwgUHJvbWV0aGV1cyBDZXJ0aWZp -Y2F0ZSBBdXRob3JpdHkxGzAZBgNVBAMMElByb21ldGhldXMgUm9vdCBDQTAgFw0x -OTA0MDUwNzU1MDBaGA8yMDU5MDMyNjA3NTUwMFowajELMAkGA1UEBhMCVVMxEzAR -BgNVBAoMClByb21ldGhldXMxKTAnBgNVBAsMIFByb21ldGhldXMgQ2VydGlmaWNh -dGUgQXV0aG9yaXR5MRswGQYDVQQDDBJQcm9tZXRoZXVzIFJvb3QgQ0EwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC/ueKrX2Ei4U7N7tqwJi67sH4czhC+ -Fik1DAwdkwEpKvb5wm5cEETK+NytegZkD4oYrbKilEnJuoxFlHzZ4BFF2BZ5oiCf -jGNgcipb+WaArIVnAVrrkcHSiIeeTBjJ8vB6GMDmqyx43l+yIk6UnPXN5uIzMOkg -EKahdetZq0Wp9z5UQK4FJb50xTr9r3MWYEV8SuAODaEVf5ofwqcEre+z5PYALE4L -BJBJ7tPbphLEkQsyTxGEx8SK71FmerAgL8uVjZZXYGZe+U9alJxxrevKcD5iBsI6 -Kfiehq/aB3j4Ma9CSEmeSt8bJx9ENYFt+nrFago1I8fE1f7JnmHJMM0fAgMBAAGj -YzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQ8 -HqjGTAVNIOyI2ynUe/kSXc7qGjAfBgNVHSMEGDAWgBQ8HqjGTAVNIOyI2ynUe/kS -Xc7qGjANBgkqhkiG9w0BAQUFAAOCAQEAVi955RKR9Rmn0TIo/eOdj+E8u6Ol8lWK -A60sHRiC4X8ZddlHW+d85KXg69x+JKN9mRrPObqltLhFaIPPcK1W8jRzZfxssFOa -eQT3Pn5LIhvndiMgvJwFol0B0vAJSReyYXQaW/Tg/c4RuhNK5gcRfTDiEYfuMxpo -3mf0rLVYGqzPei39w0RbS81s//ZJtFVKCaCSLVc7aYVUPuns77Kleil1K/jrS9TP -aO4+yGN+EuvkL2Ojp8gP6Tn/XCllfyXwQr8Huga4XtZWuvhnVhtCqrME2G6IEKVw -tYEEpJCj8INNDGsSXaRMg1r/qHqGYf8PTOUPF9FkPL3ZIn63+puDug== +MIIDnDCCAoSgAwIBAgIBATANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQGEwJOTzER +MA8GA1UECgwIR3JlZW4gQVMxJDAiBgNVBAsMG0dyZWVuIENlcnRpZmljYXRlIEF1 +dGhvcml0eTEWMBQGA1UEAwwNR3JlZW4gUm9vdCBDQTAgFw0xNzA3MTMwMzQ0Mzla +GA8yMDMwMTIzMTIzNTk1OVowXjELMAkGA1UEBhMCTk8xETAPBgNVBAoMCEdyZWVu +IEFTMSQwIgYDVQQLDBtHcmVlbiBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxFjAUBgNV +BAMMDUdyZWVuIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCn6O3e1FQIQQdA1cBD1qvTniGHxhO/p889CE/B/o/lbMWJl+UndSbDKnMtNHxv +NY1AZmEFwOvpszhH+IsmNSzf3CQx/nLjhxDR96BXt/OxGv7HS/h7FG1zCFTrYzwM +ziKVXz/yb4muY9qAdDYhE+iRAVh3zMLyQr/rs2CnIe2IJH/r/wdBm5PIX2qOphoV +PLznDf0F/TzBHB0fVytAJ2KhfEhjwUXnLyDtkhxClORYcHq20oXFYdjNxjdrcjt/ +r1WB1p3cEMnYDoHkXkATLyDoa0aBzohH3ThxPe8hzMBnzwr06T+onSYlLiMeoxEY +y9FwHJ59CbGkINyVFR1JzxutAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRgk1Mvx88q1/MJKPY8rpxQ7JNj5TAfBgNV +HSMEGDAWgBRgk1Mvx88q1/MJKPY8rpxQ7JNj5TANBgkqhkiG9w0BAQUFAAOCAQEA +p3dxixrlWluHVAi/Bz7LmS/cDo1jlJWDGcmSgtXLW48fhlW8cAEdM0bsmd5rH8PC +et3vaauWWOxsb2xwgnGKf/A7gJDVZPqAJ7h7UGmYSzeZrb+iW5MiXpZEPFrPDPRi +Y0pvcqf2iR0JJj2PqIbUtLzdszjKwFkWjCAfiTUStC3A6d6T4Dl2MvyA29pEJv0B +MnSX+ESu/gWxNJYTNFZztJOlVVbRAVGdnFXnOFMoEk44cgyPvZFMRUg74Q0DX1hA +ydOgrLOJzq8nig+r7HJNQHcwazb9Mkaf7vnE9RcGD0vTiPWkLz2HnvUmdPDJ3Mut +2aeK03EVANNdn0xZPiRj9Q== -----END CERTIFICATE----- diff --git a/vendor/github.com/prometheus/common/config/tls_config_test.go b/vendor/github.com/prometheus/common/config/tls_config_test.go index cbad618a7..31ddb6e9a 100644 --- a/vendor/github.com/prometheus/common/config/tls_config_test.go +++ b/vendor/github.com/prometheus/common/config/tls_config_test.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// +build go1.8 - package config import ( @@ -52,12 +50,11 @@ var expectedTLSConfigs = []struct { func TestValidTLSConfig(t *testing.T) { for _, cfg := range expectedTLSConfigs { + cfg.config.BuildNameToCertificate() got, err := LoadTLSConfig("testdata/" + cfg.filename) if err != nil { t.Errorf("Error parsing %s: %s", cfg.filename, err) } - // non-nil functions are never equal. - got.GetClientCertificate = nil if !reflect.DeepEqual(*got, *cfg.config) { t.Fatalf("%v: unexpected config result: \n\n%v\n expected\n\n%v", cfg.filename, got, cfg.config) } diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go index 669d56c07..47c480ef7 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode_test.go +++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go @@ -205,7 +205,7 @@ func TestProtoDecoder(t *testing.T) { &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "100", + "le": "100", }, Value: 123, Timestamp: testTime, @@ -213,7 +213,7 @@ func TestProtoDecoder(t *testing.T) { &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "120", + "le": "120", }, Value: 412, Timestamp: testTime, @@ -221,7 +221,7 @@ func TestProtoDecoder(t *testing.T) { &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "144", + "le": "144", }, Value: 592, Timestamp: testTime, @@ -229,7 +229,7 @@ func TestProtoDecoder(t *testing.T) { &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "172.8", + "le": "172.8", }, Value: 1524, Timestamp: testTime, @@ -237,7 +237,7 @@ func TestProtoDecoder(t *testing.T) { &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_bucket", - "le": "+Inf", + "le": "+Inf", }, Value: 2693, Timestamp: testTime, @@ -265,55 +265,55 @@ func TestProtoDecoder(t *testing.T) { Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_count", }, - Value: 2693, - Timestamp: testTime, + Value: 2693, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ - "le": "+Inf", + "le": "+Inf", model.MetricNameLabel: "request_duration_microseconds_bucket", }, - Value: 2693, - Timestamp: testTime, + Value: 2693, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ model.MetricNameLabel: "request_duration_microseconds_sum", }, - Value: 1756047.3, - Timestamp: testTime, + Value: 1756047.3, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ - "le": "172.8", + "le": "172.8", model.MetricNameLabel: "request_duration_microseconds_bucket", }, - Value: 1524, - Timestamp: testTime, + Value: 1524, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ - "le": "144", + "le": "144", model.MetricNameLabel: "request_duration_microseconds_bucket", }, - Value: 592, - Timestamp: testTime, + Value: 592, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ - "le": "120", + "le": "120", model.MetricNameLabel: "request_duration_microseconds_bucket", }, - Value: 412, - Timestamp: testTime, + Value: 412, + Timestamp:testTime, }, &model.Sample{ Metric: model.Metric{ - "le": "100", + "le": "100", model.MetricNameLabel: "request_duration_microseconds_bucket", }, - Value: 123, - Timestamp: testTime, + Value: 123, + Timestamp:testTime, }, }, }, diff --git a/vendor/github.com/prometheus/common/expfmt/encode_test.go b/vendor/github.com/prometheus/common/expfmt/encode_test.go index 1b2287333..16255e0e7 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode_test.go +++ b/vendor/github.com/prometheus/common/expfmt/encode_test.go @@ -1,16 +1,3 @@ -// Copyright 2018 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package expfmt import ( diff --git a/vendor/github.com/prometheus/common/go.mod b/vendor/github.com/prometheus/common/go.mod deleted file mode 100644 index 53aa3444a..000000000 --- a/vendor/github.com/prometheus/common/go.mod +++ /dev/null @@ -1,26 +0,0 @@ -module github.com/prometheus/common - -require ( - github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect - github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect - github.com/go-kit/kit v0.8.0 - github.com/go-logfmt/logfmt v0.3.0 // indirect - github.com/go-stack/stack v1.8.0 // indirect - github.com/gogo/protobuf v1.1.1 // indirect - github.com/golang/protobuf v1.2.0 - github.com/julienschmidt/httprouter v1.2.0 - github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 - github.com/pkg/errors v0.8.0 - github.com/prometheus/client_golang v0.9.1 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d // indirect - github.com/sirupsen/logrus v1.2.0 - golang.org/x/net v0.0.0-20181114220301-adae6a3d119a // indirect - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f // indirect - golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 - gopkg.in/alecthomas/kingpin.v2 v2.2.6 - gopkg.in/yaml.v2 v2.2.1 -) diff --git a/vendor/github.com/prometheus/common/go.sum b/vendor/github.com/prometheus/common/go.sum deleted file mode 100644 index 3e2d13279..000000000 --- a/vendor/github.com/prometheus/common/go.sum +++ /dev/null @@ -1,58 +0,0 @@ -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d h1:GoAlyOgbOEIFdaDqxJVlbOQ1DtGmZWs/Qau0hIlk+WQ= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a h1:gOpx8G595UYyvj8UK4+OFyY4rx037g3fmfhe5SasG3U= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5 h1:mzjBh+S5frKOsOBobWIMAbXavqjmgO17k/2puhcFR94= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go index 26e92288c..648b38cb6 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -1,12 +1,12 @@ /* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - HTTP Content-Type Autonegotiation. The functions in this package implement the behaviour specified in http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go index aa424b3da..41d328f1d 100644 --- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go @@ -1,43 +1,3 @@ -/* -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ package goautoneg import ( diff --git a/vendor/github.com/prometheus/common/model/fingerprinting_test.go b/vendor/github.com/prometheus/common/model/fingerprinting_test.go deleted file mode 100644 index b12375100..000000000 --- a/vendor/github.com/prometheus/common/model/fingerprinting_test.go +++ /dev/null @@ -1,214 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "sort" - "testing" -) - -func TestFingerprintFromString(t *testing.T) { - fs := "4294967295" - - f, err := FingerprintFromString(fs) - - if err != nil { - t.Errorf("unexpected error while getting Fingerprint from string: %s", err.Error()) - } - - expected := Fingerprint(285960729237) - - if expected != f { - t.Errorf("expected to get %d, but got %d instead", f, expected) - } - - f, err = ParseFingerprint(fs) - - if err != nil { - t.Errorf("unexpected error while getting Fingerprint from string: %s", err.Error()) - } - - if expected != f { - t.Errorf("expected to get %d, but got %d instead", f, expected) - } -} - -func TestFingerprintsSort(t *testing.T) { - fingerPrints := Fingerprints{ - 14695981039346656037, - 285960729237, - 0, - 4294967295, - 285960729237, - 18446744073709551615, - } - - sort.Sort(fingerPrints) - - expected := Fingerprints{ - 0, - 4294967295, - 285960729237, - 285960729237, - 14695981039346656037, - 18446744073709551615, - } - - for i, f := range fingerPrints { - if f != expected[i] { - t.Errorf("expected Fingerprint %d, but got %d for index %d", expected[i], f, i) - } - } -} - -func TestFingerprintSet(t *testing.T) { - // Testing with two sets of unequal length. - f := FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - 285960729237: struct{}{}, - 18446744073709551615: struct{}{}, - } - - f2 := FingerprintSet{ - 285960729237: struct{}{}, - } - - if f.Equal(f2) { - t.Errorf("expected two FingerPrintSets of unequal length to be unequal") - } - - // Testing with two unequal sets of equal length. - f = FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - } - - f2 = FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 285960729237: struct{}{}, - } - - if f.Equal(f2) { - t.Errorf("expected two FingerPrintSets of unequal content to be unequal") - } - - // Testing with equal sets of equal length. - f = FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - } - - f2 = FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - } - - if !f.Equal(f2) { - t.Errorf("expected two FingerPrintSets of equal content to be equal") - } -} - -func TestFingerprintIntersection(t *testing.T) { - scenarios := []struct { - name string - input1 FingerprintSet - input2 FingerprintSet - expected FingerprintSet - }{ - { - name: "two empty sets", - input1: FingerprintSet{}, - input2: FingerprintSet{}, - expected: FingerprintSet{}, - }, - { - name: "one empty set", - input1: FingerprintSet{ - 0: struct{}{}, - }, - input2: FingerprintSet{}, - expected: FingerprintSet{}, - }, - { - name: "two non-empty unequal sets", - input1: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - }, - - input2: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - }, - expected: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - }, - }, - { - name: "two non-empty equal sets", - input1: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 285960729237: struct{}{}, - }, - - input2: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 4294967295: struct{}{}, - }, - expected: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - }, - }, - { - name: "two non-empty equal sets of unequal length", - input1: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - 285960729237: struct{}{}, - }, - - input2: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - }, - expected: FingerprintSet{ - 14695981039346656037: struct{}{}, - 0: struct{}{}, - }, - }, - } - - for _, scenario := range scenarios { - s1 := scenario.input1 - s2 := scenario.input2 - actual := s1.Intersection(s2) - - if !actual.Equal(scenario.expected) { - t.Errorf("expected %v to be equal to %v", actual, scenario.expected) - } - } -} diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go index 2ee5b31a3..e8df28ffa 100644 --- a/vendor/github.com/prometheus/common/model/labels_test.go +++ b/vendor/github.com/prometheus/common/model/labels_test.go @@ -138,63 +138,3 @@ func TestLabelNameIsValid(t *testing.T) { } } } - -func TestSortLabelPairs(t *testing.T) { - labelPairs := LabelPairs{ - { - Name: "FooName", - Value: "FooValue", - }, - { - Name: "FooName", - Value: "BarValue", - }, - { - Name: "BarName", - Value: "FooValue", - }, - { - Name: "BazName", - Value: "BazValue", - }, - { - Name: "BarName", - Value: "FooValue", - }, - { - Name: "BazName", - Value: "FazValue", - }, - } - - sort.Sort(labelPairs) - - expectedLabelPairs := LabelPairs{ - { - Name: "BarName", - Value: "FooValue", - }, - { - Name: "BarName", - Value: "FooValue", - }, - { - Name: "BazName", - Value: "BazValue", - }, - { - Name: "BazName", - Value: "FazValue", - }, - { - Name: "FooName", - Value: "BarValue", - }, - } - - for i, expected := range expectedLabelPairs { - if expected.Name != labelPairs[i].Name || expected.Value != labelPairs[i].Value { - t.Errorf("%d expected %s, got %s", i, expected, labelPairs[i]) - } - } -} diff --git a/vendor/github.com/prometheus/common/model/labelset_test.go b/vendor/github.com/prometheus/common/model/labelset_test.go deleted file mode 100644 index dfdfc5949..000000000 --- a/vendor/github.com/prometheus/common/model/labelset_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package model - -import ( - "encoding/json" - "testing" -) - -func TestUnmarshalJSONLabelSet(t *testing.T) { - type testConfig struct { - LabelSet LabelSet `yaml:"labelSet,omitempty"` - } - - // valid LabelSet JSON - labelSetJSON := `{ - "labelSet": { - "monitor": "codelab", - "foo": "bar" - } -}` - var c testConfig - err := json.Unmarshal([]byte(labelSetJSON), &c) - - if err != nil { - t.Errorf("unexpected error while marshalling JSON : %s", err.Error()) - } - - labelSetString := c.LabelSet.String() - - expected := `{foo="bar", monitor="codelab"}` - - if expected != labelSetString { - t.Errorf("expected %s but got %s", expected, labelSetString) - } - - // invalid LabelSet JSON - invalidlabelSetJSON := `{ - "labelSet": { - "1nvalid_23name": "codelab", - "foo": "bar" - } -}` - - err = json.Unmarshal([]byte(invalidlabelSetJSON), &c) - expectedErr := `"1nvalid_23name" is not a valid label name` - if err == nil || err.Error() != expectedErr { - t.Errorf("expected an error with message '%s' to be thrown", expectedErr) - } -} - -func TestLabelSetClone(t *testing.T) { - labelSet := LabelSet{ - "monitor": "codelab", - "foo": "bar", - "bar": "baz", - } - - cloneSet := labelSet.Clone() - - if len(labelSet) != len(cloneSet) { - t.Errorf("expected the length of the cloned Label set to be %d, but got %d", - len(labelSet), len(cloneSet)) - } - - for ln, lv := range labelSet { - expected := cloneSet[ln] - if expected != lv { - t.Errorf("expected to get LabelValue %s, but got %s for LabelName %s", expected, lv, ln) - } - } -} - -func TestLabelSetMerge(t *testing.T) { - labelSet := LabelSet{ - "monitor": "codelab", - "foo": "bar", - "bar": "baz", - } - - labelSet2 := LabelSet{ - "monitor": "codelab", - "dolor": "mi", - "lorem": "ipsum", - } - - expectedSet := LabelSet{ - "monitor": "codelab", - "foo": "bar", - "bar": "baz", - "dolor": "mi", - "lorem": "ipsum", - } - - mergedSet := labelSet.Merge(labelSet2) - - if len(mergedSet) != len(expectedSet) { - t.Errorf("expected the length of the cloned Label set to be %d, but got %d", - len(expectedSet), len(mergedSet)) - } - - for ln, lv := range mergedSet { - expected := expectedSet[ln] - if expected != lv { - t.Errorf("expected to get LabelValue %s, but got %s for LabelName %s", expected, lv, ln) - } - } - -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 00804b7fe..f7250909b 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -21,6 +21,7 @@ import ( ) var ( + separator = []byte{0} // MetricNameRE is a regular expression matching valid metric // names. Note that the IsValidMetricName function performs the same // check but faster than a match with this regular expression. diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go index db447f6f2..d708b575c 100644 --- a/vendor/github.com/prometheus/common/model/metric_test.go +++ b/vendor/github.com/prometheus/common/model/metric_test.go @@ -120,7 +120,7 @@ func TestMetricNameIsValid(t *testing.T) { valid: true, }, { - mn: "", + mn: "", valid: false, }, } @@ -134,72 +134,3 @@ func TestMetricNameIsValid(t *testing.T) { } } } - -func TestMetricClone(t *testing.T) { - m := Metric{ - "first_name": "electro", - "occupation": "robot", - "manufacturer": "westinghouse", - } - - m2 := m.Clone() - - if len(m) != len(m2) { - t.Errorf("expected the length of the cloned metric to be equal to the input metric") - } - - for ln, lv := range m2 { - expected := m[ln] - if expected != lv { - t.Errorf("expected label value %s but got %s for label name %s", expected, lv, ln) - } - } -} - -func TestMetricToString(t *testing.T) { - scenarios := []struct { - name string - input Metric - expected string - }{ - { - name: "valid metric without __name__ label", - input: Metric{ - "first_name": "electro", - "occupation": "robot", - "manufacturer": "westinghouse", - }, - expected: `{first_name="electro", manufacturer="westinghouse", occupation="robot"}`, - }, - { - name: "valid metric with __name__ label", - input: Metric{ - "__name__": "electro", - "occupation": "robot", - "manufacturer": "westinghouse", - }, - expected: `electro{manufacturer="westinghouse", occupation="robot"}`, - }, - { - name: "empty metric with __name__ label", - input: Metric{ - "__name__": "fooname", - }, - expected: "fooname", - }, - { - name: "empty metric", - input: Metric{}, - expected: "{}", - }, - } - - for _, scenario := range scenarios { - t.Run(scenario.name, func(t *testing.T) { - actual := scenario.input.String() - if actual != scenario.expected { - t.Errorf("expected string output %s but got %s", actual, scenario.expected) - } - }) - } -} diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go index 0c7970586..d59c8a8c3 100644 --- a/vendor/github.com/prometheus/common/model/signature_test.go +++ b/vendor/github.com/prometheus/common/model/signature_test.go @@ -14,7 +14,6 @@ package model import ( - "fmt" "runtime" "sync" "testing" @@ -281,17 +280,13 @@ func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerpri var start, end sync.WaitGroup start.Add(1) end.Add(concLevel) - errc := make(chan error, 1) for i := 0; i < concLevel; i++ { go func() { start.Wait() for j := b.N / concLevel; j >= 0; j-- { if a := labelSetToFastFingerprint(ls); a != e { - select { - case errc <- fmt.Errorf("expected signature of %d for %s, got %d", e, ls, a): - default: - } + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) } } end.Done() @@ -300,12 +295,6 @@ func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerpri b.ResetTimer() start.Done() end.Wait() - - select { - case err := <-errc: - b.Fatal(err) - default: - } } func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { diff --git a/vendor/github.com/prometheus/common/model/silence_test.go b/vendor/github.com/prometheus/common/model/silence_test.go index f9aae4fc3..8eaaf0744 100644 --- a/vendor/github.com/prometheus/common/model/silence_test.go +++ b/vendor/github.com/prometheus/common/model/silence_test.go @@ -206,16 +206,6 @@ func TestSilenceValidate(t *testing.T) { }, err: "creator information missing", }, - { - sil: &Silence{ - Matchers: []*Matcher{}, - StartsAt: ts, - EndsAt: ts, - CreatedAt: ts, - Comment: "comment", - }, - err: "at least one matcher required", - }, } for i, c := range cases { diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go index 7b0064fdb..46259b1f1 100644 --- a/vendor/github.com/prometheus/common/model/time.go +++ b/vendor/github.com/prometheus/common/model/time.go @@ -150,13 +150,7 @@ func (t *Time) UnmarshalJSON(b []byte) error { return err } - // If the value was something like -0.1 the negative is lost in the - // parsing because of the leading zero, this ensures that we capture it. - if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { - *t = Time(v+va) * -1 - } else { - *t = Time(v + va) - } + *t = Time(v + va) default: return fmt.Errorf("invalid time %q", string(b)) diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go index 315bc4e59..3efdd65ff 100644 --- a/vendor/github.com/prometheus/common/model/time_test.go +++ b/vendor/github.com/prometheus/common/model/time_test.go @@ -14,7 +14,6 @@ package model import ( - "strconv" "testing" "time" ) @@ -131,37 +130,3 @@ func TestParseDuration(t *testing.T) { } } } - -func TestTimeJSON(t *testing.T) { - tests := []struct { - in Time - out string - }{ - {Time(1), `0.001`}, - {Time(-1), `-0.001`}, - } - - for i, test := range tests { - t.Run(strconv.Itoa(i), func(t *testing.T) { - b, err := test.in.MarshalJSON() - if err != nil { - t.Fatalf("Error marshaling time: %v", err) - } - - if string(b) != test.out { - t.Errorf("Mismatch in marshal expected=%s actual=%s", test.out, b) - } - - var tm Time - if err := tm.UnmarshalJSON(b); err != nil { - t.Fatalf("Error Unmarshaling time: %v", err) - } - - if !test.in.Equal(tm) { - t.Fatalf("Mismatch after Unmarshal expected=%v actual=%v", test.in, tm) - } - - }) - } - -} diff --git a/vendor/github.com/prometheus/common/promlog/log.go b/vendor/github.com/prometheus/common/promlog/log.go index 45fcbc408..059b2aef0 100644 --- a/vendor/github.com/prometheus/common/promlog/log.go +++ b/vendor/github.com/prometheus/common/promlog/log.go @@ -18,23 +18,12 @@ package promlog import ( "os" - "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" ) -var ( - // This timestamp format differs from RFC3339Nano by using .000 instead - // of .999999999 which changes the timestamp from 9 variable to 3 fixed - // decimals (.130 instead of .130987456). - timestampFormat = log.TimestampFormat( - func() time.Time { return time.Now().UTC() }, - "2006-01-02T15:04:05.000Z07:00", - ) -) - // AllowedLevel is a settable identifier for the minimum level a log entry // must be have. type AllowedLevel struct { @@ -101,6 +90,6 @@ func New(config *Config) log.Logger { } l = level.NewFilter(l, config.Level.o) - l = log.With(l, "ts", timestampFormat, "caller", log.DefaultCaller) + l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) return l } diff --git a/vendor/github.com/prometheus/common/route/route.go b/vendor/github.com/prometheus/common/route/route.go index 1bd0a1edd..66c9d48f9 100644 --- a/vendor/github.com/prometheus/common/route/route.go +++ b/vendor/github.com/prometheus/common/route/route.go @@ -1,16 +1,3 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package route import ( @@ -22,13 +9,9 @@ import ( type param string -// Param returns param p for the context, or the empty string when -// param does not exist in context. +// Param returns param p for the context. func Param(ctx context.Context, p string) string { - if v := ctx.Value(param(p)); v != nil { - return v.(string) - } - return "" + return ctx.Value(param(p)).(string) } // WithParam returns a new context with param p set to v. diff --git a/vendor/github.com/prometheus/common/route/route_test.go b/vendor/github.com/prometheus/common/route/route_test.go index 4d9020383..127974c72 100644 --- a/vendor/github.com/prometheus/common/route/route_test.go +++ b/vendor/github.com/prometheus/common/route/route_test.go @@ -1,16 +1,3 @@ -// Copyright 2015 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package route import ( @@ -93,23 +80,6 @@ func TestContextWithValue(t *testing.T) { router.ServeHTTP(nil, r) } -func TestContextWithoutValue(t *testing.T) { - router := New() - router.Get("/test", func(w http.ResponseWriter, r *http.Request) { - want := "" - got := Param(r.Context(), "foo") - if want != got { - t.Fatalf("Unexpected context value: want %q, got %q", want, got) - } - }) - - r, err := http.NewRequest("GET", "http://localhost:9090/test", nil) - if err != nil { - t.Fatalf("Error building test request: %s", err) - } - router.ServeHTTP(nil, r) -} - func TestInstrumentation(t *testing.T) { var got string cases := []struct { diff --git a/vendor/github.com/prometheus/common/server/static_file_server.go b/vendor/github.com/prometheus/common/server/static_file_server.go deleted file mode 100644 index d71878e4a..000000000 --- a/vendor/github.com/prometheus/common/server/static_file_server.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package server - -import ( - "net/http" - "path/filepath" -) - -var mimeTypes = map[string]string{ - ".js": "application/javascript", - ".css": "text/css", - ".png": "image/png", - ".jpg": "image/jpeg", - ".gif": "image/gif", -} - -func StaticFileServer(root http.FileSystem) http.Handler { - return http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - fileExt := filepath.Ext(r.URL.Path) - - if t, ok := mimeTypes[fileExt]; ok { - w.Header().Set("Content-Type", t) - } - - http.FileServer(root).ServeHTTP(w, r) - }, - ) -} diff --git a/vendor/github.com/prometheus/common/server/static_file_server_test.go b/vendor/github.com/prometheus/common/server/static_file_server_test.go deleted file mode 100644 index a75936b5a..000000000 --- a/vendor/github.com/prometheus/common/server/static_file_server_test.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2019 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package server - -import ( - "net/http" - "net/http/httptest" - "testing" -) - -type dummyFileSystem struct{} - -func (fs dummyFileSystem) Open(path string) (http.File, error) { - return http.Dir(".").Open(".") -} - -func TestServeHttp(t *testing.T) { - cases := []struct { - name string - path string - contentType string - }{ - { - name: "normal file", - path: "index.html", - contentType: "", - }, - { - name: "javascript", - path: "test.js", - contentType: "application/javascript", - }, - { - name: "css", - path: "test.css", - contentType: "text/css", - }, - { - name: "png", - path: "test.png", - contentType: "image/png", - }, - { - name: "jpg", - path: "test.jpg", - contentType: "image/jpeg", - }, - { - name: "gif", - path: "test.gif", - contentType: "image/gif", - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - rr := httptest.NewRecorder() - req, err := http.NewRequest("GET", "http://localhost/"+c.path, nil) - - if err != nil { - t.Fatal(err) - } - - s := StaticFileServer(dummyFileSystem{}) - s.ServeHTTP(rr, req) - - if rr.Header().Get("Content-Type") != c.contentType { - t.Fatalf("Unexpected Content-Type: %s", rr.Header().Get("Content-Type")) - } - }) - } -} diff --git a/vendor/k8s.io/component-base/Godeps/Godeps.json b/vendor/k8s.io/component-base/Godeps/Godeps.json index 6c5309f3f..c107ef725 100644 --- a/vendor/k8s.io/component-base/Godeps/Godeps.json +++ b/vendor/k8s.io/component-base/Godeps/Godeps.json @@ -50,17 +50,9 @@ "ImportPath": "github.com/PuerkitoBio/urlesc", "Rev": "5bd2802263f2" }, - { - "ImportPath": "github.com/alecthomas/template", - "Rev": "a0175ee3bccc" - }, - { - "ImportPath": "github.com/alecthomas/units", - "Rev": "2efee857e7cf" - }, { "ImportPath": "github.com/beorn7/perks", - "Rev": "v1.0.0" + "Rev": "3a771d992973" }, { "ImportPath": "github.com/blang/semver", @@ -102,14 +94,6 @@ "ImportPath": "github.com/ghodss/yaml", "Rev": "73d445a93680" }, - { - "ImportPath": "github.com/go-kit/kit", - "Rev": "v0.8.0" - }, - { - "ImportPath": "github.com/go-logfmt/logfmt", - "Rev": "v0.3.0" - }, { "ImportPath": "github.com/go-logr/logr", "Rev": "v0.1.0" @@ -130,10 +114,6 @@ "ImportPath": "github.com/go-openapi/swag", "Rev": "1d0bd113de87" }, - { - "ImportPath": "github.com/go-stack/stack", - "Rev": "v1.8.0" - }, { "ImportPath": "github.com/gogo/protobuf", "Rev": "65acae22fc9d" @@ -214,10 +194,6 @@ "ImportPath": "github.com/jstemmer/go-junit-report", "Rev": "af01ea7f8024" }, - { - "ImportPath": "github.com/julienschmidt/httprouter", - "Rev": "v1.2.0" - }, { "ImportPath": "github.com/kisielk/errcheck", "Rev": "v1.2.0" @@ -226,14 +202,6 @@ "ImportPath": "github.com/kisielk/gotool", "Rev": "v1.0.0" }, - { - "ImportPath": "github.com/konsorten/go-windows-terminal-sequences", - "Rev": "v1.0.1" - }, - { - "ImportPath": "github.com/kr/logfmt", - "Rev": "b84e30acd515" - }, { "ImportPath": "github.com/kr/pretty", "Rev": "v0.1.0" @@ -266,10 +234,6 @@ "ImportPath": "github.com/munnerz/goautoneg", "Rev": "a547fc61f48d" }, - { - "ImportPath": "github.com/mwitkow/go-conntrack", - "Rev": "cc309e4a2223" - }, { "ImportPath": "github.com/mxk/go-flowrate", "Rev": "cca7078d478f" @@ -286,33 +250,25 @@ "ImportPath": "github.com/peterbourgon/diskv", "Rev": "v2.0.1" }, - { - "ImportPath": "github.com/pkg/errors", - "Rev": "v0.8.0" - }, { "ImportPath": "github.com/pmezard/go-difflib", "Rev": "v1.0.0" }, { "ImportPath": "github.com/prometheus/client_golang", - "Rev": "v0.9.4" + "Rev": "v0.9.2" }, { "ImportPath": "github.com/prometheus/client_model", - "Rev": "fd36f4220a90" + "Rev": "5c3871d89910" }, { "ImportPath": "github.com/prometheus/common", - "Rev": "v0.4.1" + "Rev": "4724e9255275" }, { "ImportPath": "github.com/prometheus/procfs", - "Rev": "v0.0.2" - }, - { - "ImportPath": "github.com/sirupsen/logrus", - "Rev": "v1.2.0" + "Rev": "1dc9a6cbc91a" }, { "ImportPath": "github.com/spf13/afero", @@ -324,7 +280,7 @@ }, { "ImportPath": "github.com/stretchr/objx", - "Rev": "v0.1.1" + "Rev": "v0.1.0" }, { "ImportPath": "github.com/stretchr/testify", @@ -390,10 +346,6 @@ "ImportPath": "google.golang.org/grpc", "Rev": "v1.19.0" }, - { - "ImportPath": "gopkg.in/alecthomas/kingpin.v2", - "Rev": "v2.2.6" - }, { "ImportPath": "gopkg.in/check.v1", "Rev": "788fd7840127" @@ -420,15 +372,15 @@ }, { "ImportPath": "k8s.io/api", - "Rev": "d58b53da08f5" + "Rev": "95b840bb6a1f" }, { "ImportPath": "k8s.io/apimachinery", - "Rev": "62598f38f24e" + "Rev": "27d36303b655" }, { "ImportPath": "k8s.io/client-go", - "Rev": "07054768d98d" + "Rev": "1fbdaa4c8d90" }, { "ImportPath": "k8s.io/gengo", @@ -436,7 +388,7 @@ }, { "ImportPath": "k8s.io/klog", - "Rev": "v1.0.0" + "Rev": "v0.4.0" }, { "ImportPath": "k8s.io/kube-openapi", @@ -444,7 +396,7 @@ }, { "ImportPath": "k8s.io/utils", - "Rev": "5008bf6f8cd6" + "Rev": "581e00157fb1" }, { "ImportPath": "sigs.k8s.io/structured-merge-diff", diff --git a/vendor/k8s.io/component-base/go.mod b/vendor/k8s.io/component-base/go.mod index db0bc17f1..fe2010c12 100644 --- a/vendor/k8s.io/component-base/go.mod +++ b/vendor/k8s.io/component-base/go.mod @@ -6,16 +6,16 @@ go 1.12 require ( github.com/blang/semver v3.5.0+incompatible - github.com/prometheus/client_golang v0.9.4 - github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 - github.com/prometheus/common v0.4.1 - github.com/prometheus/procfs v0.0.2 + github.com/prometheus/client_golang v0.9.2 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 + github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 - k8s.io/apimachinery v0.0.0-20190925235427-62598f38f24e - k8s.io/client-go v0.0.0-20190925235746-07054768d98d - k8s.io/klog v1.0.0 - k8s.io/utils v0.0.0-20190920012459-5008bf6f8cd6 + k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 + k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 + k8s.io/klog v0.4.0 + k8s.io/utils v0.0.0-20190801114015-581e00157fb1 ) replace ( @@ -26,7 +26,7 @@ replace ( golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db golang.org/x/time => golang.org/x/time v0.0.0-20161028155119-f51c12702a4d - k8s.io/api => k8s.io/api v0.0.0-20190925180651-d58b53da08f5 - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190925235427-62598f38f24e - k8s.io/client-go => k8s.io/client-go v0.0.0-20190925235746-07054768d98d + k8s.io/api => k8s.io/api v0.0.0-20190918155943-95b840bb6a1f + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655 + k8s.io/client-go => k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 ) diff --git a/vendor/k8s.io/component-base/go.sum b/vendor/k8s.io/component-base/go.sum index 69849177c..d55f317c6 100644 --- a/vendor/k8s.io/component-base/go.sum +++ b/vendor/k8s.io/component-base/go.sum @@ -12,12 +12,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -34,15 +30,11 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -76,15 +68,11 @@ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -101,7 +89,6 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -111,31 +98,23 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= -github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -147,7 +126,7 @@ golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -178,7 +157,6 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -195,18 +173,18 @@ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.0.0-20190925180651-d58b53da08f5/go.mod h1:blPYY5r6fKug8SVOnjDtFAlzZzInCRL9NNls66SFhFI= -k8s.io/apimachinery v0.0.0-20190925235427-62598f38f24e/go.mod h1:grJJH0hgilA2pYoUiJcPu2EDUal95NTq1vpxxvMLSu8= -k8s.io/client-go v0.0.0-20190925235746-07054768d98d/go.mod h1:KumMj5rt+3qCPy5LJipGocsmMx6RW8vdDAs8QNK6jvU= +k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48= +k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4= +k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= +k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ= k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/utils v0.0.0-20190920012459-5008bf6f8cd6 h1:rfepARh/ECp66dk9TTmT//1PBkHffjnxhdOrgH4m+eA= -k8s.io/utils v0.0.0-20190920012459-5008bf6f8cd6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go index ccc1f084b..17386ed5a 100644 --- a/vendor/k8s.io/component-base/metrics/counter.go +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -34,8 +34,10 @@ type Counter struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewCounter(opts *CounterOpts) *Counter { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } kc := &Counter{ CounterOpts: opts, lazyMetric: lazyMetric{}, @@ -84,8 +86,10 @@ type CounterVec struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } cv := &CounterVec{ CounterVec: noopCounterVec, CounterOpts: opts, @@ -139,7 +143,7 @@ func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new Counter is created IFF the counterVec has // been registered to a metrics registry. -func (v *CounterVec) With(labels map[string]string) CounterMetric { +func (v *CounterVec) With(labels prometheus.Labels) CounterMetric { if !v.IsCreated() { return noop // return no-op counter } @@ -153,7 +157,7 @@ func (v *CounterVec) With(labels map[string]string) CounterMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *CounterVec) Delete(labels map[string]string) bool { +func (v *CounterVec) Delete(labels prometheus.Labels) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go index 665a24c41..82b982d9d 100644 --- a/vendor/k8s.io/component-base/metrics/gauge.go +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -34,8 +34,10 @@ type Gauge struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewGauge(opts *GaugeOpts) *Gauge { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } kc := &Gauge{ GaugeOpts: opts, lazyMetric: lazyMetric{}, @@ -84,8 +86,10 @@ type GaugeVec struct { // However, the object returned will not measure anything unless the collector is first // registered, since the metric is lazily instantiated. func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } cv := &GaugeVec{ GaugeVec: noopGaugeVec, GaugeOpts: opts, @@ -138,7 +142,7 @@ func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has // been registered to a metrics registry. -func (v *GaugeVec) With(labels map[string]string) GaugeMetric { +func (v *GaugeVec) With(labels prometheus.Labels) GaugeMetric { if !v.IsCreated() { return noop // return no-op gauge } @@ -152,7 +156,7 @@ func (v *GaugeVec) With(labels map[string]string) GaugeMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *GaugeVec) Delete(labels map[string]string) bool { +func (v *GaugeVec) Delete(labels prometheus.Labels) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go index 1b2de88fc..38d2d4164 100644 --- a/vendor/k8s.io/component-base/metrics/histogram.go +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -21,19 +21,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// DefBuckets is a wrapper for prometheus.DefBuckets -var DefBuckets = prometheus.DefBuckets - -// LinearBuckets is a wrapper for prometheus.LinearBuckets. -func LinearBuckets(start, width float64, count int) []float64 { - return prometheus.LinearBuckets(start, width, count) -} - -// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. -func ExponentialBuckets(start, factor float64, count int) []float64 { - return prometheus.ExponentialBuckets(start, factor, count) -} - // Histogram is our internal representation for our wrapping struct around prometheus // histograms. Summary implements both kubeCollector and ObserverMetric type Histogram struct { @@ -46,8 +33,10 @@ type Histogram struct { // NewHistogram returns an object which is Histogram-like. However, nothing // will be measured until the histogram is registered somewhere. func NewHistogram(opts *HistogramOpts) *Histogram { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } h := &Histogram{ HistogramOpts: opts, lazyMetric: lazyMetric{}, @@ -96,8 +85,10 @@ type HistogramVec struct { // prometheus.HistogramVec object. However, the object returned will not measure // anything unless the collector is first registered, since the metric is lazily instantiated. func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } v := &HistogramVec{ HistogramVec: noopHistogramVec, HistogramOpts: opts, @@ -146,7 +137,7 @@ func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has // been registered to a metrics registry. -func (v *HistogramVec) With(labels map[string]string) ObserverMetric { +func (v *HistogramVec) With(labels prometheus.Labels) ObserverMetric { if !v.IsCreated() { return noop } @@ -160,7 +151,7 @@ func (v *HistogramVec) With(labels map[string]string) ObserverMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *HistogramVec) Delete(labels map[string]string) bool { +func (v *HistogramVec) Delete(labels prometheus.Labels) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go deleted file mode 100644 index 11af3ae42..000000000 --- a/vendor/k8s.io/component-base/metrics/labels.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import "github.com/prometheus/client_golang/prometheus" - -// Labels represents a collection of label name -> value mappings. -type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go index 0b5e2c381..4fd1048b1 100644 --- a/vendor/k8s.io/component-base/metrics/opts.go +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -34,7 +34,7 @@ type KubeOpts struct { Subsystem string Name string Help string - ConstLabels map[string]string + ConstLabels prometheus.Labels DeprecatedVersion string deprecateOnce sync.Once annotateOnce sync.Once @@ -53,16 +53,6 @@ const ( STABLE StabilityLevel = "STABLE" ) -// setDefaults takes 'ALPHA' in case of empty. -func (sl *StabilityLevel) setDefaults() { - switch *sl { - case "": - *sl = ALPHA - default: - // no-op, since we have a StabilityLevel already - } -} - // CounterOpts is an alias for Opts. See there for doc comments. type CounterOpts KubeOpts @@ -132,7 +122,7 @@ type HistogramOpts struct { Subsystem string Name string Help string - ConstLabels map[string]string + ConstLabels prometheus.Labels Buckets []float64 DeprecatedVersion string deprecateOnce sync.Once @@ -178,7 +168,7 @@ type SummaryOpts struct { Subsystem string Name string Help string - ConstLabels map[string]string + ConstLabels prometheus.Labels Objectives map[float64]float64 MaxAge time.Duration AgeBuckets uint32 diff --git a/vendor/k8s.io/component-base/metrics/opts_test.go b/vendor/k8s.io/component-base/metrics/opts_test.go deleted file mode 100644 index d009829a6..000000000 --- a/vendor/k8s.io/component-base/metrics/opts_test.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metrics - -import ( - "testing" -) - -func TestDefaultStabilityLevel(t *testing.T) { - var tests = []struct { - name string - inputValue StabilityLevel - expectValue StabilityLevel - expectPanic bool - }{ - { - name: "empty should take ALPHA by default", - inputValue: "", - expectValue: ALPHA, - expectPanic: false, - }, - { - name: "ALPHA remain unchanged", - inputValue: ALPHA, - expectValue: ALPHA, - expectPanic: false, - }, - { - name: "STABLE remain unchanged", - inputValue: STABLE, - expectValue: STABLE, - expectPanic: false, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { - var stability = tc.inputValue - - stability.setDefaults() - if stability != tc.expectValue { - t.Errorf("Got %s, expected: %v ", stability, tc.expectValue) - } - }) - } -} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go index a511b112d..805ef722e 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/restclient/metrics.go @@ -20,6 +20,8 @@ import ( "net/url" "time" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/tools/metrics" k8smetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" @@ -32,7 +34,7 @@ var ( &k8smetrics.HistogramOpts{ Name: "rest_client_request_duration_seconds", Help: "Request latency in seconds. Broken down by verb and URL.", - Buckets: k8smetrics.ExponentialBuckets(0.001, 2, 10), + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), }, []string{"verb", "url"}, ) @@ -42,7 +44,7 @@ var ( &k8smetrics.HistogramOpts{ Name: "rest_client_request_latency_seconds", Help: "(Deprecated) Request latency in seconds. Broken down by verb and URL.", - Buckets: k8smetrics.ExponentialBuckets(0.001, 2, 10), + Buckets: prometheus.ExponentialBuckets(0.001, 2, 10), }, []string{"verb", "url"}, ) diff --git a/vendor/k8s.io/component-base/metrics/prometheus/version/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/version/metrics.go deleted file mode 100644 index 408812bab..000000000 --- a/vendor/k8s.io/component-base/metrics/prometheus/version/metrics.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package version - -import ( - "k8s.io/component-base/metrics" - "k8s.io/component-base/metrics/legacyregistry" - "k8s.io/component-base/version" -) - -var ( - buildInfo = metrics.NewGaugeVec( - &metrics.GaugeOpts{ - Name: "kubernetes_build_info", - Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", - StabilityLevel: metrics.ALPHA, - }, - []string{"major", "minor", "gitVersion", "gitCommit", "gitTreeState", "buildDate", "goVersion", "compiler", "platform"}, - ) -) - -// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus -func init() { - info := version.Get() - legacyregistry.MustRegister(buildInfo) - buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) -} diff --git a/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go index a0192acb0..bd9e6d11f 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/workqueue/metrics.go @@ -17,6 +17,8 @@ limitations under the License. package workqueue import ( + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" k8smetrics "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" @@ -54,14 +56,14 @@ var ( Subsystem: WorkQueueSubsystem, Name: QueueLatencyKey, Help: "How long in seconds an item stays in workqueue before being requested.", - Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), }, []string{"name"}) workDuration = k8smetrics.NewHistogramVec(&k8smetrics.HistogramOpts{ Subsystem: WorkQueueSubsystem, Name: WorkDurationKey, Help: "How long in seconds processing an item from workqueue takes.", - Buckets: k8smetrics.ExponentialBuckets(10e-9, 10, 10), + Buckets: prometheus.ExponentialBuckets(10e-9, 10, 10), }, []string{"name"}) unfinished = k8smetrics.NewGaugeVec(&k8smetrics.GaugeOpts{ diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go index c8aeb34c8..701f15199 100644 --- a/vendor/k8s.io/component-base/metrics/registry.go +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -148,9 +148,14 @@ func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { return r } +func registerMetadataMetrics(r *kubeRegistry) { + RegisterBuildInfo(r) +} + // NewKubeRegistry creates a new vanilla Registry without any Collectors // pre-registered. func NewKubeRegistry() KubeRegistry { r := newKubeRegistry(version.Get()) + registerMetadataMetrics(r) return r } diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go index cad4ce633..ff5737822 100644 --- a/vendor/k8s.io/component-base/metrics/summary.go +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -37,8 +37,10 @@ type Summary struct { // // DEPRECATED: as per the metrics overhaul KEP func NewSummary(opts *SummaryOpts) *Summary { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } s := &Summary{ SummaryOpts: opts, lazyMetric: lazyMetric{}, @@ -91,8 +93,10 @@ type SummaryVec struct { // // DEPRECATED: as per the metrics overhaul KEP func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { - opts.StabilityLevel.setDefaults() - + // todo: handle defaulting better + if opts.StabilityLevel == "" { + opts.StabilityLevel = ALPHA + } v := &SummaryVec{ SummaryOpts: opts, originalLabels: labels, @@ -140,7 +144,7 @@ func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { // must match those of the VariableLabels in Desc). If that label map is // accessed for the first time, a new ObserverMetric is created IFF the summaryVec has // been registered to a metrics registry. -func (v *SummaryVec) With(labels map[string]string) ObserverMetric { +func (v *SummaryVec) With(labels prometheus.Labels) ObserverMetric { if !v.IsCreated() { return noop } @@ -154,7 +158,7 @@ func (v *SummaryVec) With(labels map[string]string) ObserverMetric { // with those of the VariableLabels in Desc. However, such inconsistent Labels // can never match an actual metric, so the method will always return false in // that case. -func (v *SummaryVec) Delete(labels map[string]string) bool { +func (v *SummaryVec) Delete(labels prometheus.Labels) bool { if !v.IsCreated() { return false // since we haven't created the metric, we haven't deleted a metric with the passed in values } diff --git a/vendor/k8s.io/component-base/version/base.go b/vendor/k8s.io/component-base/version/base.go index e13678c30..c84c6d2d1 100644 --- a/vendor/k8s.io/component-base/version/base.go +++ b/vendor/k8s.io/component-base/version/base.go @@ -32,7 +32,7 @@ package version // build/mark_new_version.sh to reflect the new version, and then a // git annotated tag (using format vX.Y where X == Major version and Y // == Minor version) is created to point to the commit that updates -// component-base/version/base.go +// pkg/version/base.go var ( // TODO: Deprecate gitMajor and gitMinor, use only gitVersion // instead. First step in deprecation, keep the fields but make diff --git a/vendor/k8s.io/component-base/version/def.bzl b/vendor/k8s.io/component-base/version/def.bzl index 77edcbc89..ac7571b28 100644 --- a/vendor/k8s.io/component-base/version/def.bzl +++ b/vendor/k8s.io/component-base/version/def.bzl @@ -16,8 +16,8 @@ def version_x_defs(): # This should match the list of packages in kube::version::ldflag stamp_pkgs = [ + "k8s.io/kubernetes/pkg/version", "k8s.io/kubernetes/vendor/k8s.io/component-base/version", - "k8s.io/kubernetes/vendor/k8s.io/client-go/pkg/version", ] # This should match the list of vars in kube::version::ldflags diff --git a/vendor/k8s.io/component-base/version/verflag/verflag.go b/vendor/k8s.io/component-base/version/verflag/verflag.go deleted file mode 100644 index f27e101ca..000000000 --- a/vendor/k8s.io/component-base/version/verflag/verflag.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package verflag defines utility functions to handle command line flags -// related to version of Kubernetes. -package verflag - -import ( - "fmt" - "os" - "strconv" - - flag "github.com/spf13/pflag" - - "k8s.io/component-base/version" -) - -type versionValue int - -const ( - VersionFalse versionValue = 0 - VersionTrue versionValue = 1 - VersionRaw versionValue = 2 -) - -const strRawVersion string = "raw" - -func (v *versionValue) IsBoolFlag() bool { - return true -} - -func (v *versionValue) Get() interface{} { - return versionValue(*v) -} - -func (v *versionValue) Set(s string) error { - if s == strRawVersion { - *v = VersionRaw - return nil - } - boolVal, err := strconv.ParseBool(s) - if boolVal { - *v = VersionTrue - } else { - *v = VersionFalse - } - return err -} - -func (v *versionValue) String() string { - if *v == VersionRaw { - return strRawVersion - } - return fmt.Sprintf("%v", bool(*v == VersionTrue)) -} - -// The type of the flag as required by the pflag.Value interface -func (v *versionValue) Type() string { - return "version" -} - -func VersionVar(p *versionValue, name string, value versionValue, usage string) { - *p = value - flag.Var(p, name, usage) - // "--version" will be treated as "--version=true" - flag.Lookup(name).NoOptDefVal = "true" -} - -func Version(name string, value versionValue, usage string) *versionValue { - p := new(versionValue) - VersionVar(p, name, value, usage) - return p -} - -const versionFlagName = "version" - -var ( - versionFlag = Version(versionFlagName, VersionFalse, "Print version information and quit") -) - -// AddFlags registers this package's flags on arbitrary FlagSets, such that they point to the -// same value as the global flags. -func AddFlags(fs *flag.FlagSet) { - fs.AddFlag(flag.Lookup(versionFlagName)) -} - -// PrintAndExitIfRequested will check if the -version flag was passed -// and, if so, print the version and exit. -func PrintAndExitIfRequested() { - if *versionFlag == VersionRaw { - fmt.Printf("%#v\n", version.Get()) - os.Exit(0) - } else if *versionFlag == VersionTrue { - fmt.Printf("Kubernetes %s\n", version.Get()) - os.Exit(0) - } -} diff --git a/vendor/k8s.io/utils/.travis.yml b/vendor/k8s.io/utils/.travis.yml index a0b8687bb..66c7e0ee6 100644 --- a/vendor/k8s.io/utils/.travis.yml +++ b/vendor/k8s.io/utils/.travis.yml @@ -1,6 +1,8 @@ language: go dist: xenial go: + - 1.9.x + - 1.10.x - 1.11.x - 1.12.x go_import_path: k8s.io/utils diff --git a/vendor/k8s.io/utils/Makefile b/vendor/k8s.io/utils/Makefile index b15a1508f..6168b50af 100644 --- a/vendor/k8s.io/utils/Makefile +++ b/vendor/k8s.io/utils/Makefile @@ -13,20 +13,24 @@ # limitations under the License. .PHONY: verify -verify: verify-fmt verify-lint vet - GO111MODULE=on go test -v -race ./... +verify: depend verify-fmt verify-lint vet + go test -v -race ./... + +.PHONY: depend +depend: + go get -t -v ./... .PHONY: verify-fmt verify-fmt: - GO111MODULE=on ./hack/verify-gofmt.sh + ./hack/verify-gofmt.sh .PHONY: verify-lint verify-lint: - GO111MODULE=on ./hack/verify-golint.sh + ./hack/verify-golint.sh .PHONY: vet vet: - GO111MODULE=on ./hack/verify-govet.sh + ./hack/verify-govet.sh .PHONY: update-fmt update-fmt: diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index 2b010c930..13449ac1c 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -17,11 +17,8 @@ limitations under the License. package net import ( - "errors" "fmt" - "math/big" "net" - "strconv" ) // ParseCIDRs parses a list of cidrs and return error if any is invalid. @@ -135,49 +132,3 @@ func IsIPv6CIDR(cidr *net.IPNet) bool { ip := cidr.IP return IsIPv6(ip) } - -// ParsePort parses a string representing an IP port. If the string is not a -// valid port number, this returns an error. -func ParsePort(port string, allowZero bool) (int, error) { - portInt, err := strconv.ParseUint(port, 10, 16) - if err != nil { - return 0, err - } - if portInt == 0 && !allowZero { - return 0, errors.New("0 is not a valid port number") - } - return int(portInt), nil -} - -// BigForIP creates a big.Int based on the provided net.IP -func BigForIP(ip net.IP) *big.Int { - b := ip.To4() - if b == nil { - b = ip.To16() - } - return big.NewInt(0).SetBytes(b) -} - -// AddIPOffset adds the provided integer offset to a base big.Int representing a -// net.IP -func AddIPOffset(base *big.Int, offset int) net.IP { - return net.IP(big.NewInt(0).Add(base, big.NewInt(int64(offset))).Bytes()) -} - -// RangeSize returns the size of a range in valid addresses. -func RangeSize(subnet *net.IPNet) int64 { - ones, bits := subnet.Mask.Size() - if bits == 32 && (bits-ones) >= 31 || bits == 128 && (bits-ones) >= 127 { - return 0 - } - return int64(1) << uint(bits-ones) -} - -// GetIndexedIP returns a net.IP that is subnet.IP + index in the contiguous IP space. -func GetIndexedIP(subnet *net.IPNet, index int) (net.IP, error) { - ip := AddIPOffset(BigForIP(subnet.IP), index) - if !subnet.Contains(ip) { - return nil, fmt.Errorf("can't generate IP with index %d from subnet. subnet too small. subnet: %q", index, subnet) - } - return ip, nil -} diff --git a/vendor/k8s.io/utils/net/net_test.go b/vendor/k8s.io/utils/net/net_test.go index 71f7567ab..471e48bbe 100644 --- a/vendor/k8s.io/utils/net/net_test.go +++ b/vendor/k8s.io/utils/net/net_test.go @@ -447,161 +447,3 @@ func TestIsIPv6CIDR(t *testing.T) { } } } - -func TestParsePort(t *testing.T) { - var tests = []struct { - name string - port string - allowZero bool - expectedPort int - expectedError bool - }{ - { - name: "valid port: 1", - port: "1", - expectedPort: 1, - }, - { - name: "valid port: 1234", - port: "1234", - expectedPort: 1234, - }, - { - name: "valid port: 65535", - port: "65535", - expectedPort: 65535, - }, - { - name: "invalid port: not a number", - port: "a", - expectedError: true, - allowZero: false, - }, - { - name: "invalid port: too small", - port: "0", - expectedError: true, - }, - { - name: "invalid port: negative", - port: "-10", - expectedError: true, - }, - { - name: "invalid port: too big", - port: "65536", - expectedError: true, - }, - { - name: "zero port: allowed", - port: "0", - allowZero: true, - }, - { - name: "zero port: not allowed", - port: "0", - expectedError: true, - }, - } - - for _, rt := range tests { - t.Run(rt.name, func(t *testing.T) { - actualPort, actualError := ParsePort(rt.port, rt.allowZero) - - if actualError != nil && !rt.expectedError { - t.Errorf("%s unexpected failure: %v", rt.name, actualError) - return - } - if actualError == nil && rt.expectedError { - t.Errorf("%s passed when expected to fail", rt.name) - return - } - if actualPort != rt.expectedPort { - t.Errorf("%s returned wrong port: got %d, expected %d", rt.name, actualPort, rt.expectedPort) - } - }) - } -} - -func TestRangeSize(t *testing.T) { - testCases := []struct { - name string - cidr string - addrs int64 - }{ - { - name: "supported IPv4 cidr", - cidr: "192.168.1.0/24", - addrs: 256, - }, - { - name: "unsupported IPv4 cidr", - cidr: "192.168.1.0/1", - addrs: 0, - }, - { - name: "unsupported IPv6 mask", - cidr: "2001:db8::/1", - addrs: 0, - }, - } - - for _, tc := range testCases { - _, cidr, err := net.ParseCIDR(tc.cidr) - if err != nil { - t.Errorf("failed to parse cidr for test %s, unexpected error: '%s'", tc.name, err) - } - if size := RangeSize(cidr); size != tc.addrs { - t.Errorf("test %s failed. %s should have a range size of %d, got %d", - tc.name, tc.cidr, tc.addrs, size) - } - } -} - -func TestGetIndexedIP(t *testing.T) { - testCases := []struct { - cidr string - index int - expectError bool - expectedIP string - }{ - { - cidr: "192.168.1.0/24", - index: 20, - expectError: false, - expectedIP: "192.168.1.20", - }, - { - cidr: "192.168.1.0/30", - index: 10, - expectError: true, - }, - { - cidr: "192.168.1.0/24", - index: 255, - expectError: false, - expectedIP: "192.168.1.255", - }, - } - - for _, tc := range testCases { - _, subnet, err := net.ParseCIDR(tc.cidr) - if err != nil { - t.Errorf("failed to parse cidr %s, unexpected error: '%s'", tc.cidr, err) - } - - ip, err := GetIndexedIP(subnet, tc.index) - if err == nil && tc.expectError || err != nil && !tc.expectError { - t.Errorf("expectedError is %v and err is %s", tc.expectError, err) - continue - } - - if err == nil { - ipString := ip.String() - if ipString != tc.expectedIP { - t.Errorf("expected %s but instead got %s", tc.expectedIP, ipString) - } - } - - } -}