From 8daf490eae2e40d602ca2c96276653f3077971b3 Mon Sep 17 00:00:00 2001 From: Stephen Benjamin Date: Tue, 29 Mar 2022 06:29:46 -0400 Subject: [PATCH 1/2] Revert "Bump client_golang to v1.11.1 (#326)" This reverts commit 2a06c6cb54dba5c75efd590b41fe803e41994ef9. --- go.mod | 2 +- go.sum | 2 - .../prometheus/promhttp/instrument_client.go | 28 +---- .../prometheus/promhttp/instrument_server.go | 111 ++++-------------- .../prometheus/promhttp/option.go | 31 ----- vendor/modules.txt | 2 +- 6 files changed, 34 insertions(+), 142 deletions(-) delete mode 100644 vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go diff --git a/go.mod b/go.mod index dc99392ee..fe005528f 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/operator-framework/api v0.10.7 github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/client_golang v1.11.0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 diff --git a/go.sum b/go.sum index b0f58c153..b0d1917d0 100644 --- a/go.sum +++ b/go.sum @@ -642,8 +642,6 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= -github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go index 861b4d21c..83c49b66a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -49,10 +49,7 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // http.RoundTripper to observe the request result with the provided CounterVec. // The CounterVec must have zero, one, or two non-const non-curried labels. For // those, the only allowed label names are "code" and "method". The function -// panics otherwise. For the "method" label a predefined default label value set -// is used to filter given values. Values besides predefined values will count -// as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. Partitioning of the CounterVec happens by HTTP status code +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code // and/or HTTP method if the respective instance label names are present in the // CounterVec. For unpartitioned counting, use a CounterVec with zero labels. // @@ -60,18 +57,13 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp // is not incremented. // // See the example for ExampleInstrumentRoundTripperDuration for example usage. -func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} - for _, o := range opts { - o(rtOpts) - } - +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { code, method := checkLabels(counter) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { resp, err := next.RoundTrip(r) if err == nil { - counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc() + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() } return resp, err }) @@ -81,10 +73,7 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // http.RoundTripper to observe the request duration with the provided // ObserverVec. The ObserverVec must have zero, one, or two non-const // non-curried labels. For those, the only allowed label names are "code" and -// "method". The function panics otherwise. For the "method" label a predefined -// default label value set is used to filter given values. Values besides -// predefined values will count as `unknown` method. `WithExtraMethods` -// can be used to add more methods to the set. The Observe method of the Observer +// "method". The function panics otherwise. The Observe method of the Observer // in the ObserverVec is called with the request duration in // seconds. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For @@ -96,19 +85,14 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. -func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc { - rtOpts := &option{} - for _, o := range opts { - o(rtOpts) - } - +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { code, method := checkLabels(obs) return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { start := time.Now() resp, err := next.RoundTrip(r) if err == nil { - obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds()) + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) } return resp, err }) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index a23f0edc6..ab037db86 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -45,10 +45,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // http.Handler to observe the request duration with the provided ObserverVec. // The ObserverVec must have valid metric and label names and must have zero, // one, or two non-const non-curried labels. For those, the only allowed label -// names are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -//`WithExtraMethods` can be used to add more methods to the set. The Observe +// names are "code" and "method". The function panics otherwise. The Observe // method of the Observer in the ObserverVec is called with the request duration // in seconds. Partitioning happens by HTTP status code and/or HTTP method if // the respective instance label names are present in the ObserverVec. For @@ -61,12 +58,7 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // // Note that this method is only guaranteed to never observe negative durations // if used with Go1.9+. -func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} - for _, o := range opts { - o(mwOpts) - } - +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) if code { @@ -75,14 +67,14 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() next.ServeHTTP(w, r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) }) } @@ -90,10 +82,7 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // to observe the request result with the provided CounterVec. The CounterVec // must have valid metric and label names and must have zero, one, or two // non-const non-curried labels. For those, the only allowed label names are -// "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the +// "code" and "method". The function panics otherwise. Partitioning of the // CounterVec happens by HTTP status code and/or HTTP method if the respective // instance label names are present in the CounterVec. For unpartitioned // counting, use a CounterVec with zero labels. @@ -103,25 +92,20 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, op // If the wrapped Handler panics, the Counter is not incremented. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} - for _, o := range opts { - o(mwOpts) - } - +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(counter) if code { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc() + counter.With(labels(code, method, r.Method, d.Status())).Inc() }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) - counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc() + counter.With(labels(code, method, r.Method, 0)).Inc() }) } @@ -130,10 +114,7 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // until the response headers are written. The ObserverVec must have valid // metric and label names and must have zero, one, or two non-const non-curried // labels. For those, the only allowed label names are "code" and "method". The -// function panics otherwise. For the "method" label a predefined default label -// value set is used to filter given values. Values besides predefined values -// will count as `unknown` method.`WithExtraMethods` can be used to add more -// methods to the set. The Observe method of the Observer in the +// function panics otherwise. The Observe method of the Observer in the // ObserverVec is called with the request duration in seconds. Partitioning // happens by HTTP status code and/or HTTP method if the respective instance // label names are present in the ObserverVec. For unpartitioned observations, @@ -147,18 +128,13 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, // if used with Go1.9+. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} - for _, o := range opts { - o(mwOpts) - } - +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { now := time.Now() d := newDelegator(w, func(status int) { - obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds()) + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) }) next.ServeHTTP(d, r) }) @@ -168,11 +144,8 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // http.Handler to observe the request size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the request size in +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that @@ -183,12 +156,7 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc { - mwOpts := &option{} - for _, o := range opts { - o(mwOpts) - } - +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { code, method := checkLabels(obs) if code { @@ -196,14 +164,14 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, d := newDelegator(w, nil) next.ServeHTTP(d, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size)) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) }) } return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { next.ServeHTTP(w, r) size := computeApproximateRequestSize(r) - obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size)) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) }) } @@ -211,11 +179,8 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // http.Handler to observe the response size with the provided ObserverVec. The // ObserverVec must have valid metric and label names and must have zero, one, // or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. For the "method" -// label a predefined default label value set is used to filter given values. -// Values besides predefined values will count as `unknown` method. -// `WithExtraMethods` can be used to add more methods to the set. The Observe -// method of the Observer in the ObserverVec is called with the response size in +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the response size in // bytes. Partitioning happens by HTTP status code and/or HTTP method if the // respective instance label names are present in the ObserverVec. For // unpartitioned observations, use an ObserverVec with zero labels. Note that @@ -226,18 +191,12 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, // If the wrapped Handler panics, no values are reported. // // See the example for InstrumentHandlerDuration for example usage. -func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler { - mwOpts := &option{} - for _, o := range opts { - o(mwOpts) - } - +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { code, method := checkLabels(obs) - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { d := newDelegator(w, nil) next.ServeHTTP(d, r) - obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written())) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) }) } @@ -331,7 +290,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool { // unnecessary allocations on each request. var emptyLabels = prometheus.Labels{} -func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels { +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { if !(code || method) { return emptyLabels } @@ -341,7 +300,7 @@ func labels(code, method bool, reqMethod string, status int, extraMethods ...str labels["code"] = sanitizeCode(status) } if method { - labels["method"] = sanitizeMethod(reqMethod, extraMethods...) + labels["method"] = sanitizeMethod(reqMethod) } return labels @@ -371,12 +330,7 @@ func computeApproximateRequestSize(r *http.Request) int { return s } -// If the wrapped http.Handler has a known method, it will be sanitized and returned. -// Otherwise, "unknown" will be returned. The known method list can be extended -// as needed by using extraMethods parameter. -func sanitizeMethod(m string, extraMethods ...string) string { - // See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for - // the methods chosen as default. +func sanitizeMethod(m string) string { switch m { case "GET", "get": return "get" @@ -394,25 +348,15 @@ func sanitizeMethod(m string, extraMethods ...string) string { return "options" case "NOTIFY", "notify": return "notify" - case "TRACE", "trace": - return "trace" - case "PATCH", "patch": - return "patch" default: - for _, method := range extraMethods { - if strings.EqualFold(m, method) { - return strings.ToLower(m) - } - } - return "unknown" + return strings.ToLower(m) } } // If the wrapped http.Handler has not set a status code, i.e. the value is -// currently 0, sanitizeCode will return 200, for consistency with behavior in +// currently 0, santizeCode will return 200, for consistency with behavior in // the stdlib. func sanitizeCode(s int) string { - // See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml switch s { case 100: return "100" @@ -509,9 +453,6 @@ func sanitizeCode(s int) string { return "511" default: - if s >= 100 && s <= 599 { - return strconv.Itoa(s) - } - return "unknown" + return strconv.Itoa(s) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go deleted file mode 100644 index 35e41bd1e..000000000 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package promhttp - -// Option are used to configure a middleware or round tripper.. -type Option func(*option) - -type option struct { - extraMethods []string -} - -// WithExtraMethods adds additional HTTP methods to the list of allowed methods. -// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list. -// -// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage. -func WithExtraMethods(methods ...string) Option { - return func(o *option) { - o.extraMethods = methods - } -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 17acf1106..de770952a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -329,7 +329,7 @@ github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/ # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.11.1 +# github.com/prometheus/client_golang v1.11.0 ## explicit; go 1.13 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/collectors From 126fdf80a549cd5bcf49be112dd09ac075c5a506 Mon Sep 17 00:00:00 2001 From: Stephen Benjamin Date: Tue, 29 Mar 2022 06:29:46 -0400 Subject: [PATCH 2/2] Revert "Performance addon operator code base move to NTO (#322)" This reverts commit 7e304f086e888828558de32e4122f14fa8568e41. --- Makefile | 51 +- assets/pao/assets.go | 19 - assets/pao/configs/99-low-latency-hooks.json | 11 - assets/pao/configs/99-netdev-rps.rules | 1 - assets/pao/configs/99-runtimes.conf | 20 - assets/pao/scripts/hugepages-allocation.sh | 26 - assets/pao/scripts/low-latency-hooks.sh | 35 - assets/pao/scripts/set-rps-mask.sh | 36 - assets/pao/tuned/openshift-node-performance | 132 -- cmd/cluster-node-tuning-operator/main.go | 57 +- cmd/performance-profile-creator/README.md | 99 - cmd/performance-profile-creator/cmd/root.go | 573 ----- cmd/performance-profile-creator/main.go | 30 - ...ance.openshift.io_performanceprofiles.yaml | 679 ------ examples/pao/crd/kustomization.yaml | 6 - examples/pao/default/kustomization.yaml | 5 - examples/pao/rbac/kustomization.yaml | 3 - examples/pao/rbac/role.yaml | 100 - examples/pao/rbac/role_binding.yaml | 24 - examples/pao/samples/kustomization.yaml | 6 - .../performance_v1_performanceprofile.yaml | 25 - ...rformance_v1alpha1_performanceprofile.yaml | 25 - .../performance_v2_performanceprofile.yaml | 26 - go.mod | 142 +- go.sum | 658 +++--- hack/boilerplate.go.txt | 15 - hack/build-latency-test-bin.sh | 10 - hack/build-test-bin.sh | 18 - hack/clean-deploy.sh | 61 - hack/deploy.sh | 53 - hack/docs-generate.sh | 13 - hack/label-worker-cnf.sh | 13 - hack/lint.sh | 17 - hack/release-note.sh | 40 - hack/run-functests.sh | 31 - hack/run-latency-testing.sh | 25 - hack/run-perf-profile-creator-functests.sh | 25 - hack/run-perf-profile-creator.sh | 81 - hack/run-render-command-functests.sh | 25 - hack/show-cluster-version.sh | 9 - hack/unittests.sh | 31 - hack/verify-generated.sh | 7 - hack/wait-for-mcp.sh | 65 - manifests/20-performance-profile.crd.yaml | 490 ---- manifests/40-rbac.yaml | 16 +- manifests/45-webhook-configuration.yaml | 55 - manifests/50-operator-ibm-cloud-managed.yaml | 11 - manifests/50-operator.yaml | 12 - pkg/apis/pao/performance_suite_test.go | 13 - pkg/apis/pao/performance_test.go | 99 - pkg/apis/pao/v1/groupversion_info.go | 36 - .../pao/v1/performanceprofile_conversion.go | 4 - pkg/apis/pao/v1/performanceprofile_types.go | 198 -- pkg/apis/pao/v1/performanceprofile_webhook.go | 12 - pkg/apis/pao/v1/zz_generated.deepcopy.go | 363 --- pkg/apis/pao/v1alpha1/groupversion_info.go | 36 - .../v1alpha1/performanceprofile_conversion.go | 221 -- .../pao/v1alpha1/performanceprofile_types.go | 163 -- .../pao/v1alpha1/zz_generated.deepcopy.go | 296 --- pkg/apis/pao/v2/groupversion_info.go | 36 - .../pao/v2/performanceprofile_conversion.go | 293 --- pkg/apis/pao/v2/performanceprofile_types.go | 198 -- .../pao/v2/performanceprofile_validation.go | 291 --- ...erformanceprofile_validation_suite_test.go | 13 - .../v2/performanceprofile_validation_test.go | 322 --- pkg/apis/pao/v2/performanceprofile_webhook.go | 23 - pkg/apis/pao/v2/zz_generated.deepcopy.go | 363 --- pkg/pao/cmd/render/render.go | 165 -- .../performanceprofile/components/consts.go | 31 - .../components/kubeletconfig/kubeletconfig.go | 167 -- .../kubeletconfig/kubeletconfig_suite_test.go | 13 - .../kubeletconfig/kubeletconfig_test.go | 169 -- .../components/machineconfig/machineconfig.go | 354 --- .../machineconfig/machineconfig_suite_test.go | 13 - .../machineconfig/machineconfig_test.go | 77 - .../components/manifestset/manifestset.go | 79 - .../components/profile/profile.go | 57 - .../components/profile/profile_suite_test.go | 13 - .../components/profile/profile_test.go | 67 - .../components/runtimeclass/runtimeclass.go | 27 - .../components/tuned/tuned.go | 204 -- .../components/tuned/tuned_suite_test.go | 13 - .../components/tuned/tuned_test.go | 305 --- .../performanceprofile/components/utils.go | 141 -- .../components/utils_suite_test.go | 13 - .../components/utils_test.go | 101 - .../performanceprofile_controller.go | 696 ------ ...erformanceprofile_controller_suite_test.go | 30 - .../performanceprofile_controller_test.go | 884 ------- pkg/pao/controller/resources.go | 331 --- pkg/pao/controller/status.go | 296 --- pkg/pao/profilecreator/helper.go | 18 - pkg/pao/profilecreator/mcp.go | 197 -- pkg/pao/profilecreator/profilecreator.go | 556 ----- .../profilecreator_suite_test.go | 13 - pkg/pao/profilecreator/profilecreator_test.go | 844 ------- pkg/pao/utils/testing/testing.go | 100 - .../base/performance/kustomization.yaml | 6 - .../base/performance/machine_config_pool.yaml | 17 - .../ci-cluster/performance/kustomization.yaml | 6 - .../performance/kustomization.yaml | 9 - .../performance/performance_profile.yaml | 27 - .../performance/kustomization.yaml | 8 - .../performance/performance_profile.yaml | 29 - .../performance/kustomization.yaml | 5 - .../performance/machine_config_pool.yaml | 17 - .../performance_profile_creator_suite_test.go | 22 - .../1_performance-profile_creator/ppc.go | 263 --- .../1_render_command/render_suite_test.go | 59 - .../1_render_command/render_test.go | 104 - test/e2e/pao/functests/0_config/config.go | 197 -- .../test_suite_performance_config_test.go | 31 - .../functests/1_performance/cpu_management.go | 682 ------ .../pao/functests/1_performance/hugepages.go | 213 -- .../pao/functests/1_performance/netqueues.go | 364 --- .../functests/1_performance/performance.go | 1333 ----------- .../pao/functests/1_performance/rt-kernel.go | 74 - .../test_suite_performance_test.go | 51 - .../1_performance/topology_manager.go | 47 - .../functests/2_performance_update/kubelet.go | 196 -- .../test_suite_performance_update_test.go | 51 - .../2_performance_update/updating_profile.go | 589 ----- .../functests/3_performance_status/status.go | 224 -- .../test_suite_performance_status_test.go | 50 - test/e2e/pao/functests/4_latency/latency.go | 511 ----- .../4_latency/test_suite_latency_test.go | 53 - .../5_latency_testing_suite_test.go | 72 - .../5_latency_testing/latency_testing.go | 276 --- test/e2e/pao/functests/README.txt | 5 - test/e2e/pao/functests/test.go | 1 - test/e2e/pao/functests/utils/clean/clean.go | 70 - .../e2e/pao/functests/utils/client/clients.go | 122 - .../pao/functests/utils/cluster/cluster.go | 31 - test/e2e/pao/functests/utils/consts.go | 99 - .../functests/utils/daemonset/daemonset.go | 48 - .../functests/utils/discovery/discovery.go | 87 - test/e2e/pao/functests/utils/events/events.go | 19 - test/e2e/pao/functests/utils/images/images.go | 27 - .../e2e/pao/functests/utils/images/prepull.go | 94 - .../e2e/pao/functests/utils/junit/reporter.go | 18 - test/e2e/pao/functests/utils/log/log.go | 52 - test/e2e/pao/functests/utils/mcps/mcps.go | 235 -- .../functests/utils/namespaces/namespaces.go | 49 - test/e2e/pao/functests/utils/nodes/nodes.go | 342 --- test/e2e/pao/functests/utils/pods/pods.go | 219 -- .../pao/functests/utils/profiles/profiles.go | 147 -- test/e2e/pao/functests/utils/tuned/tuned.go | 55 - test/e2e/pao/functests/utils/utils.go | 59 - .../core/nodes/master1.yaml | 456 ---- .../core/nodes/master2.yaml | 457 ---- .../core/nodes/master3.yaml | 457 ---- .../core/nodes/worker1.yaml | 490 ---- .../core/nodes/worker2.yaml | 465 ---- .../machineconfigpools/master.yaml | 140 -- .../machineconfigpools/worker-cnf.yaml | 157 -- .../machineconfigpools/worker.yaml | 139 -- .../nodes/worker1/sysinfo.tgz | Bin 196952 -> 0 bytes .../nodes/worker2/sysinfo.tgz | Bin 36422 -> 0 bytes .../nodes/ocp47sno-master-0.demo.lab.yaml | 405 ---- .../machineconfigpools/master.yaml | 140 -- .../machineconfigpools/worker.yaml | 139 -- .../ocp47sno-master-0.demo.lab/sysinfo.tgz | Bin 27381 -> 0 bytes .../must-gather.bare-metal.json | 1 - .../ppc-expected-info/must-gather.sno.json | 1 - .../ppc-expected-profiles/profile1.json | 6 - .../ppc-expected-profiles/profile1.yaml | 16 - .../ppc-expected-profiles/profile2.json | 7 - .../ppc-expected-profiles/profile2.yaml | 18 - .../ppc-expected-profiles/profile3.json | 11 - .../ppc-expected-profiles/profile3.yaml | 21 - .../ppc-expected-profiles/profile4.json | 10 - .../ppc-expected-profiles/profile4.yaml | 21 - .../ppc-expected-profiles/profile5.json | 9 - .../ppc-expected-profiles/profile5.yaml | 19 - .../manual_kubeletconfig.yaml | 64 - .../manual_machineconfig.yaml | 100 - .../manual_runtimeclass.yaml | 14 - .../render-expected-output/manual_tuned.yaml | 67 - tools/docs-generator/docs-generator.go | 263 --- .../RHsyseng/operator-utils/LICENSE | 201 -- .../operator-utils/pkg/validation/schema.go | 68 - .../pkg/validation/schema_sync.go | 136 -- vendor/github.com/StackExchange/wmi/LICENSE | 20 - vendor/github.com/StackExchange/wmi/README.md | 6 - .../StackExchange/wmi/swbemservices.go | 260 --- vendor/github.com/StackExchange/wmi/wmi.go | 501 ---- .../{coreos => ajeddeloh}/go-json/README | 0 .../{coreos => ajeddeloh}/go-json/decode.go | 0 .../{coreos => ajeddeloh}/go-json/encode.go | 0 .../{coreos => ajeddeloh}/go-json/fold.go | 0 .../{coreos => ajeddeloh}/go-json/indent.go | 0 .../{coreos => ajeddeloh}/go-json/scanner.go | 0 .../{coreos => ajeddeloh}/go-json/stream.go | 0 .../{coreos => ajeddeloh}/go-json/tags.go | 0 .../asaskevich/govalidator/.gitignore | 15 - .../asaskevich/govalidator/.travis.yml | 12 - .../asaskevich/govalidator/CODE_OF_CONDUCT.md | 43 - .../asaskevich/govalidator/CONTRIBUTING.md | 63 - .../github.com/asaskevich/govalidator/LICENSE | 21 - .../asaskevich/govalidator/README.md | 619 ----- .../asaskevich/govalidator/arrays.go | 87 - .../asaskevich/govalidator/converter.go | 81 - .../github.com/asaskevich/govalidator/doc.go | 3 - .../asaskevich/govalidator/error.go | 47 - .../asaskevich/govalidator/numerics.go | 100 - .../asaskevich/govalidator/patterns.go | 107 - .../asaskevich/govalidator/types.go | 655 ------ .../asaskevich/govalidator/utils.go | 270 --- .../asaskevich/govalidator/validator.go | 1627 ------------- .../asaskevich/govalidator/wercker.yml | 15 - vendor/github.com/blang/semver/v4/LICENSE | 22 - vendor/github.com/blang/semver/v4/json.go | 23 - vendor/github.com/blang/semver/v4/range.go | 416 ---- vendor/github.com/blang/semver/v4/semver.go | 476 ---- vendor/github.com/blang/semver/v4/sort.go | 28 - vendor/github.com/blang/semver/v4/sql.go | 30 - vendor/github.com/coreos/go-systemd/LICENSE | 191 -- vendor/github.com/coreos/go-systemd/NOTICE | 5 - .../coreos/go-systemd/unit/deserialize.go | 278 --- .../coreos/go-systemd/unit/escape.go | 116 - .../coreos/go-systemd/unit/option.go | 59 - .../coreos/go-systemd/unit/serialize.go | 75 - vendor/github.com/coreos/ignition/LICENSE | 202 -- vendor/github.com/coreos/ignition/NOTICE | 5 - .../ignition/config/shared/errors/errors.go | 109 - .../config/shared/validations/unit.go | 54 - .../coreos/ignition/config/v2_2/types/ca.go | 27 - .../ignition/config/v2_2/types/config.go | 91 - .../ignition/config/v2_2/types/directory.go | 37 - .../coreos/ignition/config/v2_2/types/disk.go | 128 -- .../coreos/ignition/config/v2_2/types/file.go | 69 - .../ignition/config/v2_2/types/filesystem.go | 144 -- .../ignition/config/v2_2/types/ignition.go | 52 - .../coreos/ignition/config/v2_2/types/mode.go | 26 - .../coreos/ignition/config/v2_2/types/node.go | 73 - .../ignition/config/v2_2/types/partition.go | 77 - .../ignition/config/v2_2/types/passwd.go | 67 - .../coreos/ignition/config/v2_2/types/path.go | 28 - .../coreos/ignition/config/v2_2/types/raid.go | 57 - .../ignition/config/v2_2/types/schema.go | 246 -- .../coreos/ignition/config/v2_2/types/unit.go | 131 -- .../coreos/ignition/config/v2_2/types/url.go | 53 - .../config/v2_2/types/verification.go | 77 - .../ignition/config/validate/report/report.go | 158 -- .../coreos/ignition/v2/config/merge/merge.go | 206 +- .../v2/config/shared/errors/errors.go | 2 +- .../ignition/v2/config/util/parsingErrors.go | 2 + .../coreos/ignition/v2/config/v3_2/config.go | 10 +- .../v2/config/v3_2/types/filesystem.go | 2 - .../v2/config/v3_2/types/partition.go | 4 +- .../github.com/coreos/vcontext/json/json.go | 2 +- .../github.com/coreos/vcontext/path/path.go | 7 +- vendor/github.com/ghodss/yaml/.travis.yml | 5 +- vendor/github.com/ghodss/yaml/yaml.go | 71 +- vendor/github.com/ghodss/yaml/yaml_go110.go | 14 - vendor/github.com/go-ole/go-ole/.travis.yml | 8 - vendor/github.com/go-ole/go-ole/ChangeLog.md | 49 - vendor/github.com/go-ole/go-ole/LICENSE | 21 - vendor/github.com/go-ole/go-ole/README.md | 46 - vendor/github.com/go-ole/go-ole/appveyor.yml | 54 - vendor/github.com/go-ole/go-ole/com.go | 344 --- vendor/github.com/go-ole/go-ole/com_func.go | 174 -- vendor/github.com/go-ole/go-ole/connect.go | 192 -- vendor/github.com/go-ole/go-ole/constants.go | 153 -- vendor/github.com/go-ole/go-ole/error.go | 51 - vendor/github.com/go-ole/go-ole/error_func.go | 8 - .../github.com/go-ole/go-ole/error_windows.go | 24 - vendor/github.com/go-ole/go-ole/guid.go | 284 --- .../go-ole/go-ole/iconnectionpoint.go | 20 - .../go-ole/go-ole/iconnectionpoint_func.go | 21 - .../go-ole/go-ole/iconnectionpoint_windows.go | 43 - .../go-ole/iconnectionpointcontainer.go | 17 - .../go-ole/iconnectionpointcontainer_func.go | 11 - .../iconnectionpointcontainer_windows.go | 25 - vendor/github.com/go-ole/go-ole/idispatch.go | 94 - .../go-ole/go-ole/idispatch_func.go | 19 - .../go-ole/go-ole/idispatch_windows.go | 200 -- .../github.com/go-ole/go-ole/ienumvariant.go | 19 - .../go-ole/go-ole/ienumvariant_func.go | 19 - .../go-ole/go-ole/ienumvariant_windows.go | 63 - .../github.com/go-ole/go-ole/iinspectable.go | 18 - .../go-ole/go-ole/iinspectable_func.go | 15 - .../go-ole/go-ole/iinspectable_windows.go | 72 - .../go-ole/go-ole/iprovideclassinfo.go | 21 - .../go-ole/go-ole/iprovideclassinfo_func.go | 7 - .../go-ole/iprovideclassinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/itypeinfo.go | 34 - .../go-ole/go-ole/itypeinfo_func.go | 7 - .../go-ole/go-ole/itypeinfo_windows.go | 21 - vendor/github.com/go-ole/go-ole/iunknown.go | 57 - .../github.com/go-ole/go-ole/iunknown_func.go | 19 - .../go-ole/go-ole/iunknown_windows.go | 58 - vendor/github.com/go-ole/go-ole/ole.go | 157 -- .../go-ole/go-ole/oleutil/connection.go | 100 - .../go-ole/go-ole/oleutil/connection_func.go | 10 - .../go-ole/oleutil/connection_windows.go | 58 - .../go-ole/go-ole/oleutil/go-get.go | 6 - .../go-ole/go-ole/oleutil/oleutil.go | 127 - vendor/github.com/go-ole/go-ole/safearray.go | 27 - .../go-ole/go-ole/safearray_func.go | 211 -- .../go-ole/go-ole/safearray_windows.go | 337 --- .../go-ole/go-ole/safearrayconversion.go | 140 -- .../go-ole/go-ole/safearrayslices.go | 33 - vendor/github.com/go-ole/go-ole/utility.go | 101 - vendor/github.com/go-ole/go-ole/variables.go | 16 - vendor/github.com/go-ole/go-ole/variant.go | 105 - .../github.com/go-ole/go-ole/variant_386.go | 11 - .../github.com/go-ole/go-ole/variant_amd64.go | 12 - .../go-ole/go-ole/variant_date_386.go | 22 - .../go-ole/go-ole/variant_date_amd64.go | 20 - .../go-ole/go-ole/variant_ppc64le.go | 12 - .../github.com/go-ole/go-ole/variant_s390x.go | 12 - vendor/github.com/go-ole/go-ole/vt_string.go | 58 - vendor/github.com/go-ole/go-ole/winrt.go | 99 - vendor/github.com/go-ole/go-ole/winrt_doc.go | 36 - .../go-openapi/analysis/.codecov.yml | 5 - .../go-openapi/analysis/.gitattributes | 2 - .../github.com/go-openapi/analysis/.gitignore | 5 - .../go-openapi/analysis/.golangci.yml | 53 - .../go-openapi/analysis/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/analysis/LICENSE | 202 -- .../github.com/go-openapi/analysis/README.md | 31 - .../go-openapi/analysis/analyzer.go | 1064 --------- .../go-openapi/analysis/appveyor.yml | 32 - .../github.com/go-openapi/analysis/debug.go | 23 - vendor/github.com/go-openapi/analysis/doc.go | 43 - .../github.com/go-openapi/analysis/fixer.go | 79 - .../github.com/go-openapi/analysis/flatten.go | 802 ------- .../go-openapi/analysis/flatten_name.go | 293 --- .../go-openapi/analysis/flatten_options.go | 78 - .../analysis/internal/debug/debug.go | 41 - .../internal/flatten/normalize/normalize.go | 87 - .../internal/flatten/operations/operations.go | 90 - .../internal/flatten/replace/replace.go | 434 ---- .../flatten/schutils/flatten_schema.go | 29 - .../analysis/internal/flatten/sortref/keys.go | 201 -- .../internal/flatten/sortref/sort_ref.go | 141 -- .../github.com/go-openapi/analysis/mixin.go | 515 ----- .../github.com/go-openapi/analysis/schema.go | 256 --- .../github.com/go-openapi/errors/.gitignore | 2 - .../go-openapi/errors/.golangci.yml | 41 - .../github.com/go-openapi/errors/.travis.yml | 30 - .../go-openapi/errors/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/errors/LICENSE | 202 -- vendor/github.com/go-openapi/errors/README.md | 8 - vendor/github.com/go-openapi/errors/api.go | 164 -- vendor/github.com/go-openapi/errors/auth.go | 22 - vendor/github.com/go-openapi/errors/doc.go | 28 - .../github.com/go-openapi/errors/headers.go | 85 - .../go-openapi/errors/middleware.go | 51 - .../github.com/go-openapi/errors/parsing.go | 59 - vendor/github.com/go-openapi/errors/schema.go | 588 ----- .../github.com/go-openapi/loads/.editorconfig | 26 - vendor/github.com/go-openapi/loads/.gitignore | 4 - .../github.com/go-openapi/loads/.golangci.yml | 44 - .../github.com/go-openapi/loads/.travis.yml | 25 - .../go-openapi/loads/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/loads/LICENSE | 202 -- vendor/github.com/go-openapi/loads/README.md | 6 - vendor/github.com/go-openapi/loads/doc.go | 21 - vendor/github.com/go-openapi/loads/loaders.go | 134 -- vendor/github.com/go-openapi/loads/options.go | 61 - vendor/github.com/go-openapi/loads/spec.go | 266 --- .../github.com/go-openapi/spec/.editorconfig | 26 - vendor/github.com/go-openapi/spec/.gitignore | 2 - .../github.com/go-openapi/spec/.golangci.yml | 42 - .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/spec/LICENSE | 202 -- vendor/github.com/go-openapi/spec/README.md | 34 - .../github.com/go-openapi/spec/appveyor.yml | 32 - vendor/github.com/go-openapi/spec/bindata.go | 297 --- vendor/github.com/go-openapi/spec/cache.go | 98 - .../go-openapi/spec/contact_info.go | 57 - vendor/github.com/go-openapi/spec/debug.go | 49 - vendor/github.com/go-openapi/spec/errors.go | 19 - vendor/github.com/go-openapi/spec/expander.go | 594 ----- .../go-openapi/spec/external_docs.go | 24 - vendor/github.com/go-openapi/spec/header.go | 203 -- vendor/github.com/go-openapi/spec/info.go | 165 -- vendor/github.com/go-openapi/spec/items.go | 234 -- vendor/github.com/go-openapi/spec/license.go | 56 - .../github.com/go-openapi/spec/normalizer.go | 203 -- .../go-openapi/spec/normalizer_nonwindows.go | 43 - .../go-openapi/spec/normalizer_windows.go | 154 -- .../github.com/go-openapi/spec/operation.go | 397 ---- .../github.com/go-openapi/spec/parameter.go | 326 --- .../github.com/go-openapi/spec/path_item.go | 87 - vendor/github.com/go-openapi/spec/paths.go | 97 - .../github.com/go-openapi/spec/properties.go | 91 - vendor/github.com/go-openapi/spec/ref.go | 193 -- vendor/github.com/go-openapi/spec/resolver.go | 127 - vendor/github.com/go-openapi/spec/response.go | 152 -- .../github.com/go-openapi/spec/responses.go | 127 - vendor/github.com/go-openapi/spec/schema.go | 646 ------ .../go-openapi/spec/schema_loader.go | 338 --- .../go-openapi/spec/security_scheme.go | 170 -- vendor/github.com/go-openapi/spec/spec.go | 78 - vendor/github.com/go-openapi/spec/swagger.go | 448 ---- vendor/github.com/go-openapi/spec/tag.go | 75 - .../github.com/go-openapi/spec/validations.go | 215 -- .../github.com/go-openapi/spec/xml_object.go | 68 - .../go-openapi/strfmt/.editorconfig | 26 - .../go-openapi/strfmt/.gitattributes | 2 - .../github.com/go-openapi/strfmt/.gitignore | 2 - .../go-openapi/strfmt/.golangci.yml | 49 - .../go-openapi/strfmt/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/strfmt/LICENSE | 202 -- vendor/github.com/go-openapi/strfmt/README.md | 88 - vendor/github.com/go-openapi/strfmt/bson.go | 165 -- vendor/github.com/go-openapi/strfmt/date.go | 187 -- .../github.com/go-openapi/strfmt/default.go | 2035 ----------------- vendor/github.com/go-openapi/strfmt/doc.go | 18 - .../github.com/go-openapi/strfmt/duration.go | 211 -- vendor/github.com/go-openapi/strfmt/format.go | 326 --- vendor/github.com/go-openapi/strfmt/time.go | 294 --- vendor/github.com/go-openapi/strfmt/ulid.go | 225 -- .../github.com/go-openapi/swag/.gitattributes | 2 - .../github.com/go-openapi/swag/.golangci.yml | 11 - .../go-openapi/{spec => swag}/.travis.yml | 16 +- vendor/github.com/go-openapi/swag/file.go | 33 - .../github.com/go-openapi/swag/post_go18.go | 1 - .../github.com/go-openapi/swag/post_go19.go | 1 - vendor/github.com/go-openapi/swag/pre_go18.go | 1 - vendor/github.com/go-openapi/swag/pre_go19.go | 1 - .../go-openapi/validate/.editorconfig | 26 - .../go-openapi/validate/.gitattributes | 2 - .../github.com/go-openapi/validate/.gitignore | 5 - .../go-openapi/validate/.golangci.yml | 50 - .../go-openapi/validate/CODE_OF_CONDUCT.md | 74 - vendor/github.com/go-openapi/validate/LICENSE | 202 -- .../github.com/go-openapi/validate/README.md | 38 - .../go-openapi/validate/appveyor.yml | 32 - .../github.com/go-openapi/validate/context.go | 56 - .../github.com/go-openapi/validate/debug.go | 47 - .../go-openapi/validate/default_validator.go | 281 --- vendor/github.com/go-openapi/validate/doc.go | 85 - .../go-openapi/validate/example_validator.go | 270 --- .../github.com/go-openapi/validate/formats.go | 69 - .../github.com/go-openapi/validate/helpers.go | 324 --- .../go-openapi/validate/object_validator.go | 279 --- .../github.com/go-openapi/validate/options.go | 43 - .../github.com/go-openapi/validate/result.go | 486 ---- vendor/github.com/go-openapi/validate/rexp.go | 71 - .../github.com/go-openapi/validate/schema.go | 260 --- .../go-openapi/validate/schema_messages.go | 78 - .../go-openapi/validate/schema_option.go | 54 - .../go-openapi/validate/schema_props.go | 240 -- .../go-openapi/validate/slice_validator.go | 105 - vendor/github.com/go-openapi/validate/spec.go | 795 ------- .../go-openapi/validate/spec_messages.go | 360 --- vendor/github.com/go-openapi/validate/type.go | 177 -- .../go-openapi/validate/update-fixtures.sh | 15 - .../go-openapi/validate/validator.go | 645 ------ .../github.com/go-openapi/validate/values.go | 446 ---- vendor/github.com/go-stack/stack/.travis.yml | 15 - vendor/github.com/go-stack/stack/LICENSE.md | 21 - vendor/github.com/go-stack/stack/README.md | 38 - vendor/github.com/go-stack/stack/stack.go | 400 ---- vendor/github.com/google/gofuzz/.travis.yml | 11 +- .../github.com/google/gofuzz/CONTRIBUTING.md | 2 +- vendor/github.com/google/gofuzz/README.md | 18 - .../google/gofuzz/bytesource/bytesource.go | 81 - vendor/github.com/google/gofuzz/fuzz.go | 137 +- vendor/github.com/google/uuid/hash.go | 4 +- vendor/github.com/google/uuid/sql.go | 2 +- vendor/github.com/google/uuid/uuid.go | 10 +- vendor/github.com/google/uuid/version4.go | 8 - .../jaypipes/ghw/.get-go-packages.sh | 3 - vendor/github.com/jaypipes/ghw/.gitignore | 3 - vendor/github.com/jaypipes/ghw/.travis.yml | 25 - vendor/github.com/jaypipes/ghw/COPYING | 176 -- vendor/github.com/jaypipes/ghw/Dockerfile | 26 - vendor/github.com/jaypipes/ghw/Makefile | 39 - vendor/github.com/jaypipes/ghw/README.md | 1361 ----------- vendor/github.com/jaypipes/ghw/SNAPSHOT.md | 45 - vendor/github.com/jaypipes/ghw/alias.go | 151 -- vendor/github.com/jaypipes/ghw/doc.go | 314 --- vendor/github.com/jaypipes/ghw/host.go | 139 -- .../jaypipes/ghw/pkg/baseboard/baseboard.go | 84 - .../ghw/pkg/baseboard/baseboard_linux.go | 20 - .../ghw/pkg/baseboard/baseboard_stub.go | 17 - .../ghw/pkg/baseboard/baseboard_windows.go | 37 - .../github.com/jaypipes/ghw/pkg/bios/bios.go | 77 - .../jaypipes/ghw/pkg/bios/bios_linux.go | 16 - .../jaypipes/ghw/pkg/bios/bios_stub.go | 17 - .../jaypipes/ghw/pkg/bios/bios_windows.go | 32 - .../jaypipes/ghw/pkg/block/block.go | 250 -- .../jaypipes/ghw/pkg/block/block_darwin.go | 283 --- .../jaypipes/ghw/pkg/block/block_linux.go | 470 ---- .../jaypipes/ghw/pkg/block/block_stub.go | 17 - .../jaypipes/ghw/pkg/block/block_windows.go | 220 -- .../jaypipes/ghw/pkg/chassis/chassis.go | 121 - .../jaypipes/ghw/pkg/chassis/chassis_linux.go | 26 - .../jaypipes/ghw/pkg/chassis/chassis_stub.go | 17 - .../ghw/pkg/chassis/chassis_windows.go | 43 - .../jaypipes/ghw/pkg/context/context.go | 133 -- vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go | 169 -- .../jaypipes/ghw/pkg/cpu/cpu_linux.go | 220 -- .../jaypipes/ghw/pkg/cpu/cpu_stub.go | 17 - .../jaypipes/ghw/pkg/cpu/cpu_windows.go | 55 - vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go | 95 - .../jaypipes/ghw/pkg/gpu/gpu_linux.go | 152 -- .../jaypipes/ghw/pkg/gpu/gpu_stub.go | 17 - .../jaypipes/ghw/pkg/gpu/gpu_windows.go | 131 -- .../jaypipes/ghw/pkg/linuxdmi/dmi_linux.go | 29 - .../jaypipes/ghw/pkg/linuxpath/path_linux.go | 115 - .../jaypipes/ghw/pkg/marshal/marshal.go | 47 - .../jaypipes/ghw/pkg/memory/memory.go | 80 - .../jaypipes/ghw/pkg/memory/memory_cache.go | 101 - .../ghw/pkg/memory/memory_cache_linux.go | 188 -- .../jaypipes/ghw/pkg/memory/memory_linux.go | 237 -- .../jaypipes/ghw/pkg/memory/memory_stub.go | 17 - .../jaypipes/ghw/pkg/memory/memory_windows.go | 72 - vendor/github.com/jaypipes/ghw/pkg/net/net.go | 83 - .../jaypipes/ghw/pkg/net/net_linux.go | 222 -- .../jaypipes/ghw/pkg/net/net_stub.go | 17 - .../jaypipes/ghw/pkg/net/net_windows.go | 65 - .../jaypipes/ghw/pkg/option/option.go | 246 -- .../jaypipes/ghw/pkg/pci/address/address.go | 55 - vendor/github.com/jaypipes/ghw/pkg/pci/pci.go | 206 -- .../jaypipes/ghw/pkg/pci/pci_linux.go | 378 --- .../jaypipes/ghw/pkg/pci/pci_stub.go | 30 - .../jaypipes/ghw/pkg/product/product.go | 100 - .../jaypipes/ghw/pkg/product/product_linux.go | 23 - .../jaypipes/ghw/pkg/product/product_stub.go | 17 - .../ghw/pkg/product/product_windows.go | 45 - .../jaypipes/ghw/pkg/snapshot/clonetree.go | 263 --- .../ghw/pkg/snapshot/clonetree_block.go | 221 -- .../ghw/pkg/snapshot/clonetree_gpu.go | 33 - .../ghw/pkg/snapshot/clonetree_net.go | 31 - .../ghw/pkg/snapshot/clonetree_pci.go | 148 -- .../jaypipes/ghw/pkg/snapshot/pack.go | 112 - .../jaypipes/ghw/pkg/snapshot/testdata.tar.gz | Bin 485 -> 0 bytes .../jaypipes/ghw/pkg/snapshot/trace.go | 19 - .../jaypipes/ghw/pkg/snapshot/unpack.go | 129 -- .../jaypipes/ghw/pkg/topology/topology.go | 128 -- .../ghw/pkg/topology/topology_linux.go | 100 - .../ghw/pkg/topology/topology_stub.go | 17 - .../ghw/pkg/topology/topology_windows.go | 156 -- .../jaypipes/ghw/pkg/unitutil/unit.go | 37 - .../github.com/jaypipes/ghw/pkg/util/util.go | 53 - .../jaypipes/pcidb/.get-go-packages.sh | 3 - vendor/github.com/jaypipes/pcidb/.gitignore | 2 - vendor/github.com/jaypipes/pcidb/.travis.yml | 48 - vendor/github.com/jaypipes/pcidb/COPYING | 176 -- vendor/github.com/jaypipes/pcidb/Gopkg.lock | 17 - vendor/github.com/jaypipes/pcidb/Gopkg.toml | 34 - vendor/github.com/jaypipes/pcidb/LICENSE | 201 -- vendor/github.com/jaypipes/pcidb/Makefile | 46 - vendor/github.com/jaypipes/pcidb/README.md | 413 ---- vendor/github.com/jaypipes/pcidb/context.go | 86 - vendor/github.com/jaypipes/pcidb/discover.go | 111 - vendor/github.com/jaypipes/pcidb/main.go | 198 -- vendor/github.com/jaypipes/pcidb/parse.go | 163 -- .../github.com/mitchellh/go-homedir/LICENSE | 21 - .../github.com/mitchellh/go-homedir/README.md | 14 - .../mitchellh/go-homedir/homedir.go | 167 -- .../mitchellh/mapstructure/CHANGELOG.md | 73 - .../github.com/mitchellh/mapstructure/LICENSE | 21 - .../mitchellh/mapstructure/README.md | 46 - .../mitchellh/mapstructure/decode_hooks.go | 256 --- .../mitchellh/mapstructure/error.go | 50 - .../mitchellh/mapstructure/mapstructure.go | 1462 ------------ .../moby/spdystream/CONTRIBUTING.md | 13 - vendor/github.com/moby/spdystream/LICENSE | 202 -- vendor/github.com/moby/spdystream/MAINTAINERS | 40 - vendor/github.com/moby/spdystream/NOTICE | 5 - vendor/github.com/moby/spdystream/README.md | 77 - .../github.com/moby/spdystream/connection.go | 972 -------- vendor/github.com/moby/spdystream/handlers.go | 52 - vendor/github.com/moby/spdystream/priority.go | 114 - .../moby/spdystream/spdy/dictionary.go | 203 -- .../github.com/moby/spdystream/spdy/read.go | 364 --- .../github.com/moby/spdystream/spdy/types.go | 291 --- .../github.com/moby/spdystream/spdy/write.go | 334 --- vendor/github.com/moby/spdystream/stream.go | 343 --- vendor/github.com/moby/spdystream/utils.go | 32 - vendor/github.com/oklog/ulid/.gitignore | 29 - vendor/github.com/oklog/ulid/.travis.yml | 16 - vendor/github.com/oklog/ulid/AUTHORS.md | 2 - vendor/github.com/oklog/ulid/CHANGELOG.md | 33 - vendor/github.com/oklog/ulid/CONTRIBUTING.md | 17 - vendor/github.com/oklog/ulid/Gopkg.lock | 15 - vendor/github.com/oklog/ulid/Gopkg.toml | 26 - vendor/github.com/oklog/ulid/LICENSE | 201 -- vendor/github.com/oklog/ulid/README.md | 150 -- vendor/github.com/oklog/ulid/ulid.go | 614 ----- .../onsi/ginkgo/extensions/table/table.go | 110 - .../ginkgo/extensions/table/table_entry.go | 129 -- .../onsi/gomega/gstruct/elements.go | 231 -- .../gomega/gstruct/errors/nested_types.go | 72 - .../github.com/onsi/gomega/gstruct/fields.go | 165 -- .../github.com/onsi/gomega/gstruct/ignore.go | 39 - vendor/github.com/onsi/gomega/gstruct/keys.go | 126 - .../github.com/onsi/gomega/gstruct/pointer.go | 58 - .../github.com/onsi/gomega/gstruct/types.go | 15 - ...config-operator_01_infrastructure.crd.yaml | 3 +- .../openshift/api/config/v1/types_feature.go | 6 - .../api/config/v1/types_infrastructure.go | 7 +- .../openshift/custom-resource-status/LICENSE | 201 -- .../conditions/v1/conditions.go | 82 - .../conditions/v1/doc.go | 9 - .../conditions/v1/types.go | 51 - .../conditions/v1/zz_generated.deepcopy.go | 23 - .../v1/types.go | 30 +- .../v1/zz_generated.deepcopy.go | 27 - .../clientset/versioned/clientset.go | 34 +- ...achineconfiguration.openshift.io_client.go | 20 +- .../github.com/operator-framework/api/LICENSE | 201 -- .../api/pkg/lib/version/version.go | 67 - .../api/pkg/operators/doc.go | 4 - .../api/pkg/operators/register.go | 31 - .../api/pkg/operators/v1/doc.go | 4 - .../api/pkg/operators/v1/groupversion_info.go | 28 - .../api/pkg/operators/v1/operator_types.go | 88 - .../operators/v1/operatorcondition_types.go | 49 - .../pkg/operators/v1/operatorgroup_types.go | 158 -- .../pkg/operators/v1/zz_generated.deepcopy.go | 430 ---- .../operators/v1alpha1/catalogsource_types.go | 253 -- .../v1alpha1/clusterserviceversion.go | 208 -- .../v1alpha1/clusterserviceversion_types.go | 713 ------ .../api/pkg/operators/v1alpha1/doc.go | 6 - .../operators/v1alpha1/installplan_types.go | 384 ---- .../api/pkg/operators/v1alpha1/register.go | 55 - .../operators/v1alpha1/subscription_types.go | 328 --- .../v1alpha1/zz_generated.deepcopy.go | 1551 ------------- .../operator-lifecycle-manager/LICENSE | 201 -- .../pkg/api/apis/operators/doc.go | 5 - .../pkg/api/apis/operators/register.go | 3 - .../operators/v1alpha1/catalogsource_types.go | 54 - .../v1alpha1/clusterserviceversion.go | 57 - .../v1alpha1/clusterserviceversion_types.go | 284 --- .../pkg/api/apis/operators/v1alpha1/doc.go | 3 - .../operators/v1alpha1/installplan_types.go | 285 --- .../api/apis/operators/v1alpha1/register.go | 48 - .../operators/v1alpha1/subscription_types.go | 81 - .../v1alpha1/zz_generated.deepcopy.go | 893 -------- vendor/github.com/sirupsen/logrus/.gitignore | 4 - .../github.com/sirupsen/logrus/.golangci.yml | 40 - vendor/github.com/sirupsen/logrus/.travis.yml | 15 - .../github.com/sirupsen/logrus/CHANGELOG.md | 259 --- vendor/github.com/sirupsen/logrus/LICENSE | 21 - vendor/github.com/sirupsen/logrus/README.md | 513 ----- vendor/github.com/sirupsen/logrus/alt_exit.go | 76 - .../github.com/sirupsen/logrus/appveyor.yml | 14 - .../github.com/sirupsen/logrus/buffer_pool.go | 52 - vendor/github.com/sirupsen/logrus/doc.go | 26 - vendor/github.com/sirupsen/logrus/entry.go | 431 ---- vendor/github.com/sirupsen/logrus/exported.go | 270 --- .../github.com/sirupsen/logrus/formatter.go | 78 - vendor/github.com/sirupsen/logrus/hooks.go | 34 - .../sirupsen/logrus/json_formatter.go | 128 -- vendor/github.com/sirupsen/logrus/logger.go | 404 ---- vendor/github.com/sirupsen/logrus/logrus.go | 186 -- .../logrus/terminal_check_appengine.go | 11 - .../sirupsen/logrus/terminal_check_bsd.go | 13 - .../sirupsen/logrus/terminal_check_js.go | 7 - .../logrus/terminal_check_no_terminal.go | 11 - .../logrus/terminal_check_notappengine.go | 17 - .../sirupsen/logrus/terminal_check_solaris.go | 11 - .../sirupsen/logrus/terminal_check_unix.go | 13 - .../sirupsen/logrus/terminal_check_windows.go | 27 - .../sirupsen/logrus/text_formatter.go | 339 --- vendor/github.com/sirupsen/logrus/writer.go | 70 - vendor/go.mongodb.org/mongo-driver/LICENSE | 201 -- .../go.mongodb.org/mongo-driver/bson/bson.go | 52 - .../mongo-driver/bson/bson_1_8.go | 81 - .../bson/bsoncodec/array_codec.go | 50 - .../mongo-driver/bson/bsoncodec/bsoncodec.go | 216 -- .../bson/bsoncodec/byte_slice_codec.go | 111 - .../bson/bsoncodec/cond_addr_codec.go | 63 - .../bson/bsoncodec/default_value_decoders.go | 1717 -------------- .../bson/bsoncodec/default_value_encoders.go | 773 ------- .../mongo-driver/bson/bsoncodec/doc.go | 84 - .../bson/bsoncodec/empty_interface_codec.go | 140 -- .../mongo-driver/bson/bsoncodec/map_codec.go | 297 --- .../mongo-driver/bson/bsoncodec/mode.go | 65 - .../bson/bsoncodec/pointer_codec.go | 109 - .../mongo-driver/bson/bsoncodec/proxy.go | 14 - .../mongo-driver/bson/bsoncodec/registry.go | 474 ---- .../bson/bsoncodec/slice_codec.go | 199 -- .../bson/bsoncodec/string_codec.go | 119 - .../bson/bsoncodec/struct_codec.go | 671 ------ .../bson/bsoncodec/struct_tag_parser.go | 139 -- .../mongo-driver/bson/bsoncodec/time_codec.go | 127 - .../mongo-driver/bson/bsoncodec/types.go | 82 - .../mongo-driver/bson/bsoncodec/uint_codec.go | 173 -- .../bsonoptions/byte_slice_codec_options.go | 38 - .../empty_interface_codec_options.go | 38 - .../bson/bsonoptions/map_codec_options.go | 67 - .../bson/bsonoptions/slice_codec_options.go | 38 - .../bson/bsonoptions/string_codec_options.go | 41 - .../bson/bsonoptions/struct_codec_options.go | 87 - .../bson/bsonoptions/time_codec_options.go | 38 - .../bson/bsonoptions/uint_codec_options.go | 38 - .../mongo-driver/bson/bsonrw/copier.go | 445 ---- .../mongo-driver/bson/bsonrw/doc.go | 9 - .../bson/bsonrw/extjson_parser.go | 806 ------- .../bson/bsonrw/extjson_reader.go | 644 ------ .../bson/bsonrw/extjson_tables.go | 223 -- .../bson/bsonrw/extjson_wrappers.go | 492 ---- .../bson/bsonrw/extjson_writer.go | 737 ------ .../mongo-driver/bson/bsonrw/json_scanner.go | 528 ----- .../mongo-driver/bson/bsonrw/mode.go | 108 - .../mongo-driver/bson/bsonrw/reader.go | 63 - .../mongo-driver/bson/bsonrw/value_reader.go | 877 ------- .../mongo-driver/bson/bsonrw/value_writer.go | 606 ----- .../mongo-driver/bson/bsonrw/writer.go | 102 - .../mongo-driver/bson/bsontype/bsontype.go | 95 - .../mongo-driver/bson/decoder.go | 118 - .../go.mongodb.org/mongo-driver/bson/doc.go | 138 -- .../mongo-driver/bson/encoder.go | 99 - .../mongo-driver/bson/marshal.go | 223 -- .../mongo-driver/bson/primitive/decimal.go | 425 ---- .../mongo-driver/bson/primitive/objectid.go | 184 -- .../mongo-driver/bson/primitive/primitive.go | 217 -- .../mongo-driver/bson/primitive_codecs.go | 111 - .../go.mongodb.org/mongo-driver/bson/raw.go | 92 - .../mongo-driver/bson/raw_element.go | 51 - .../mongo-driver/bson/raw_value.go | 309 --- .../mongo-driver/bson/registry.go | 24 - .../go.mongodb.org/mongo-driver/bson/types.go | 85 - .../mongo-driver/bson/unmarshal.go | 101 - .../mongo-driver/x/bsonx/bsoncore/array.go | 164 -- .../x/bsonx/bsoncore/bson_arraybuilder.go | 201 -- .../x/bsonx/bsoncore/bson_documentbuilder.go | 189 -- .../mongo-driver/x/bsonx/bsoncore/bsoncore.go | 862 ------- .../mongo-driver/x/bsonx/bsoncore/document.go | 410 ---- .../x/bsonx/bsoncore/document_sequence.go | 183 -- .../mongo-driver/x/bsonx/bsoncore/element.go | 152 -- .../mongo-driver/x/bsonx/bsoncore/tables.go | 223 -- .../mongo-driver/x/bsonx/bsoncore/value.go | 980 -------- .../x/net/http2/client_conn_pool.go | 47 +- vendor/golang.org/x/net/http2/errors.go | 12 - vendor/golang.org/x/net/http2/frame.go | 62 +- .../golang.org/x/net/http2/hpack/huffman.go | 38 +- vendor/golang.org/x/net/http2/pipe.go | 11 - vendor/golang.org/x/net/http2/server.go | 111 +- vendor/golang.org/x/net/http2/transport.go | 1446 +++++------- vendor/golang.org/x/net/http2/writesched.go | 4 +- .../x/net/http2/writesched_random.go | 6 +- vendor/golang.org/x/net/idna/go118.go | 14 - vendor/golang.org/x/net/idna/idna10.0.0.go | 6 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 4 +- vendor/golang.org/x/net/idna/pre_go118.go | 12 - vendor/golang.org/x/net/idna/punycode.go | 36 +- vendor/howett.net/plist/.gitlab-ci.yml | 39 - vendor/howett.net/plist/LICENSE | 58 - vendor/howett.net/plist/README.md | 21 - vendor/howett.net/plist/bplist.go | 26 - vendor/howett.net/plist/bplist_generator.go | 303 --- vendor/howett.net/plist/bplist_parser.go | 353 --- vendor/howett.net/plist/decode.go | 119 - vendor/howett.net/plist/doc.go | 5 - vendor/howett.net/plist/encode.go | 126 - vendor/howett.net/plist/fuzz.go | 17 - vendor/howett.net/plist/marshal.go | 186 -- vendor/howett.net/plist/must.go | 50 - vendor/howett.net/plist/plist.go | 85 - vendor/howett.net/plist/plist_types.go | 139 -- vendor/howett.net/plist/text_generator.go | 226 -- vendor/howett.net/plist/text_parser.go | 515 ----- vendor/howett.net/plist/text_tables.go | 43 - vendor/howett.net/plist/typeinfo.go | 170 -- vendor/howett.net/plist/unmarshal.go | 317 --- vendor/howett.net/plist/util.go | 25 - vendor/howett.net/plist/xml_generator.go | 185 -- vendor/howett.net/plist/xml_parser.go | 216 -- vendor/howett.net/plist/zerocopy.go | 20 - vendor/howett.net/plist/zerocopy_appengine.go | 7 - .../k8s.io/api/autoscaling/v2beta1/types.go | 2 +- .../zz_generated.prerelease-lifecycle.go | 2 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 1 - .../zz_generated.prerelease-lifecycle.go | 10 - .../k8s.io/api/flowcontrol/v1alpha1/types.go | 8 +- .../zz_generated.prerelease-lifecycle.go | 8 +- .../k8s.io/api/flowcontrol/v1beta1/types.go | 4 - .../zz_generated.prerelease-lifecycle.go | 28 - .../client/clientset/clientset/scheme/doc.go | 20 - .../clientset/clientset/scheme/register.go | 58 - .../apimachinery/pkg/util/httpstream/doc.go | 19 - .../pkg/util/httpstream/httpstream.go | 159 -- .../pkg/util/httpstream/spdy/connection.go | 204 -- .../pkg/util/httpstream/spdy/roundtripper.go | 372 --- .../pkg/util/httpstream/spdy/upgrade.go | 120 - .../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 - .../pkg/util/remotecommand/constants.go | 53 - .../third_party/forked/golang/netutil/addr.go | 27 - vendor/k8s.io/client-go/tools/pager/pager.go | 5 +- .../client-go/tools/remotecommand/doc.go | 20 - .../tools/remotecommand/errorstream.go | 55 - .../client-go/tools/remotecommand/reader.go | 41 - .../tools/remotecommand/remotecommand.go | 142 -- .../client-go/tools/remotecommand/resize.go | 33 - .../client-go/tools/remotecommand/v1.go | 160 -- .../client-go/tools/remotecommand/v2.go | 200 -- .../client-go/tools/remotecommand/v3.go | 111 - .../client-go/tools/remotecommand/v4.go | 119 - .../k8s.io/client-go/transport/spdy/spdy.go | 105 - vendor/k8s.io/client-go/util/exec/exec.go | 52 - vendor/k8s.io/kubelet/LICENSE | 202 -- vendor/k8s.io/kubelet/config/v1beta1/doc.go | 21 - .../k8s.io/kubelet/config/v1beta1/register.go | 44 - vendor/k8s.io/kubelet/config/v1beta1/types.go | 1183 ---------- .../config/v1beta1/zz_generated.deepcopy.go | 521 ----- vendor/k8s.io/kubernetes/LICENSE | 202 -- .../kubernetes/pkg/kubelet/cm/cpuset/OWNERS | 6 - .../pkg/kubelet/cm/cpuset/cpuset.go | 344 --- vendor/kubevirt.io/qe-tools/LICENSE | 201 -- .../qe-tools/pkg/ginkgo-reporters/README.md | 36 - .../pkg/ginkgo-reporters/junit_reporter.go | 36 - .../pkg/ginkgo-reporters/polarion_reporter.go | 261 --- .../qe-tools/pkg/polarion-xml/polarion_xml.go | 105 - vendor/modules.txt | 249 +- .../pkg/cache/internal/informers_map.go | 58 +- .../pkg/client/fake/client.go | 765 ------- .../controller-runtime/pkg/client/fake/doc.go | 39 - .../structured-merge-diff/v4/typed/merge.go | 176 +- version/version.go | 5 - 819 files changed, 1364 insertions(+), 116231 deletions(-) delete mode 100644 assets/pao/assets.go delete mode 100644 assets/pao/configs/99-low-latency-hooks.json delete mode 100644 assets/pao/configs/99-netdev-rps.rules delete mode 100644 assets/pao/configs/99-runtimes.conf delete mode 100755 assets/pao/scripts/hugepages-allocation.sh delete mode 100644 assets/pao/scripts/low-latency-hooks.sh delete mode 100644 assets/pao/scripts/set-rps-mask.sh delete mode 100644 assets/pao/tuned/openshift-node-performance delete mode 100644 cmd/performance-profile-creator/README.md delete mode 100644 cmd/performance-profile-creator/cmd/root.go delete mode 100644 cmd/performance-profile-creator/main.go delete mode 100644 examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml delete mode 100644 examples/pao/crd/kustomization.yaml delete mode 100644 examples/pao/default/kustomization.yaml delete mode 100644 examples/pao/rbac/kustomization.yaml delete mode 100644 examples/pao/rbac/role.yaml delete mode 100644 examples/pao/rbac/role_binding.yaml delete mode 100644 examples/pao/samples/kustomization.yaml delete mode 100644 examples/pao/samples/performance_v1_performanceprofile.yaml delete mode 100644 examples/pao/samples/performance_v1alpha1_performanceprofile.yaml delete mode 100644 examples/pao/samples/performance_v2_performanceprofile.yaml delete mode 100644 hack/boilerplate.go.txt delete mode 100755 hack/build-latency-test-bin.sh delete mode 100755 hack/build-test-bin.sh delete mode 100755 hack/clean-deploy.sh delete mode 100755 hack/deploy.sh delete mode 100755 hack/docs-generate.sh delete mode 100755 hack/label-worker-cnf.sh delete mode 100755 hack/lint.sh delete mode 100755 hack/release-note.sh delete mode 100755 hack/run-functests.sh delete mode 100755 hack/run-latency-testing.sh delete mode 100755 hack/run-perf-profile-creator-functests.sh delete mode 100755 hack/run-perf-profile-creator.sh delete mode 100755 hack/run-render-command-functests.sh delete mode 100755 hack/show-cluster-version.sh delete mode 100755 hack/unittests.sh delete mode 100755 hack/verify-generated.sh delete mode 100755 hack/wait-for-mcp.sh delete mode 100644 manifests/20-performance-profile.crd.yaml delete mode 100644 manifests/45-webhook-configuration.yaml delete mode 100644 pkg/apis/pao/performance_suite_test.go delete mode 100644 pkg/apis/pao/performance_test.go delete mode 100644 pkg/apis/pao/v1/groupversion_info.go delete mode 100644 pkg/apis/pao/v1/performanceprofile_conversion.go delete mode 100644 pkg/apis/pao/v1/performanceprofile_types.go delete mode 100644 pkg/apis/pao/v1/performanceprofile_webhook.go delete mode 100644 pkg/apis/pao/v1/zz_generated.deepcopy.go delete mode 100644 pkg/apis/pao/v1alpha1/groupversion_info.go delete mode 100644 pkg/apis/pao/v1alpha1/performanceprofile_conversion.go delete mode 100644 pkg/apis/pao/v1alpha1/performanceprofile_types.go delete mode 100644 pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go delete mode 100644 pkg/apis/pao/v2/groupversion_info.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_conversion.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_types.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_validation.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_validation_suite_test.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_validation_test.go delete mode 100644 pkg/apis/pao/v2/performanceprofile_webhook.go delete mode 100644 pkg/apis/pao/v2/zz_generated.deepcopy.go delete mode 100644 pkg/pao/cmd/render/render.go delete mode 100644 pkg/pao/controller/performanceprofile/components/consts.go delete mode 100644 pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go delete mode 100644 pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go delete mode 100644 pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go delete mode 100644 pkg/pao/controller/performanceprofile/components/profile/profile.go delete mode 100644 pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/profile/profile_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go delete mode 100644 pkg/pao/controller/performanceprofile/components/tuned/tuned.go delete mode 100644 pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/utils.go delete mode 100644 pkg/pao/controller/performanceprofile/components/utils_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile/components/utils_test.go delete mode 100644 pkg/pao/controller/performanceprofile_controller.go delete mode 100644 pkg/pao/controller/performanceprofile_controller_suite_test.go delete mode 100644 pkg/pao/controller/performanceprofile_controller_test.go delete mode 100644 pkg/pao/controller/resources.go delete mode 100644 pkg/pao/controller/status.go delete mode 100644 pkg/pao/profilecreator/helper.go delete mode 100644 pkg/pao/profilecreator/mcp.go delete mode 100644 pkg/pao/profilecreator/profilecreator.go delete mode 100644 pkg/pao/profilecreator/profilecreator_suite_test.go delete mode 100644 pkg/pao/profilecreator/profilecreator_test.go delete mode 100644 pkg/pao/utils/testing/testing.go delete mode 100644 test/e2e/pao/cluster-setup/base/performance/kustomization.yaml delete mode 100644 test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml delete mode 100644 test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml delete mode 100644 test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml delete mode 100644 test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml delete mode 100644 test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml delete mode 100644 test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml delete mode 100644 test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml delete mode 100644 test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml delete mode 100644 test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go delete mode 100644 test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go delete mode 100644 test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go delete mode 100644 test/e2e/pao/functests-render-command/1_render_command/render_test.go delete mode 100644 test/e2e/pao/functests/0_config/config.go delete mode 100644 test/e2e/pao/functests/0_config/test_suite_performance_config_test.go delete mode 100644 test/e2e/pao/functests/1_performance/cpu_management.go delete mode 100644 test/e2e/pao/functests/1_performance/hugepages.go delete mode 100644 test/e2e/pao/functests/1_performance/netqueues.go delete mode 100644 test/e2e/pao/functests/1_performance/performance.go delete mode 100644 test/e2e/pao/functests/1_performance/rt-kernel.go delete mode 100644 test/e2e/pao/functests/1_performance/test_suite_performance_test.go delete mode 100644 test/e2e/pao/functests/1_performance/topology_manager.go delete mode 100644 test/e2e/pao/functests/2_performance_update/kubelet.go delete mode 100644 test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go delete mode 100644 test/e2e/pao/functests/2_performance_update/updating_profile.go delete mode 100644 test/e2e/pao/functests/3_performance_status/status.go delete mode 100644 test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go delete mode 100644 test/e2e/pao/functests/4_latency/latency.go delete mode 100644 test/e2e/pao/functests/4_latency/test_suite_latency_test.go delete mode 100644 test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go delete mode 100644 test/e2e/pao/functests/5_latency_testing/latency_testing.go delete mode 100644 test/e2e/pao/functests/README.txt delete mode 100644 test/e2e/pao/functests/test.go delete mode 100644 test/e2e/pao/functests/utils/clean/clean.go delete mode 100644 test/e2e/pao/functests/utils/client/clients.go delete mode 100644 test/e2e/pao/functests/utils/cluster/cluster.go delete mode 100644 test/e2e/pao/functests/utils/consts.go delete mode 100644 test/e2e/pao/functests/utils/daemonset/daemonset.go delete mode 100644 test/e2e/pao/functests/utils/discovery/discovery.go delete mode 100644 test/e2e/pao/functests/utils/events/events.go delete mode 100644 test/e2e/pao/functests/utils/images/images.go delete mode 100644 test/e2e/pao/functests/utils/images/prepull.go delete mode 100644 test/e2e/pao/functests/utils/junit/reporter.go delete mode 100644 test/e2e/pao/functests/utils/log/log.go delete mode 100644 test/e2e/pao/functests/utils/mcps/mcps.go delete mode 100644 test/e2e/pao/functests/utils/namespaces/namespaces.go delete mode 100644 test/e2e/pao/functests/utils/nodes/nodes.go delete mode 100644 test/e2e/pao/functests/utils/pods/pods.go delete mode 100644 test/e2e/pao/functests/utils/profiles/profiles.go delete mode 100644 test/e2e/pao/functests/utils/tuned/tuned.go delete mode 100644 test/e2e/pao/functests/utils/utils.go delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker2.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker-cnf.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml delete mode 100644 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker1/sysinfo.tgz delete mode 100644 test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker2/sysinfo.tgz delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/core/nodes/ocp47sno-master-0.demo.lab.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml delete mode 100755 test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml delete mode 100644 test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/nodes/ocp47sno-master-0.demo.lab/sysinfo.tgz delete mode 100644 test/e2e/pao/testdata/ppc-expected-info/must-gather.bare-metal.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-info/must-gather.sno.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile1.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile1.yaml delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile2.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile2.yaml delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile3.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile3.yaml delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile4.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile4.yaml delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile5.json delete mode 100644 test/e2e/pao/testdata/ppc-expected-profiles/profile5.yaml delete mode 100644 test/e2e/pao/testdata/render-expected-output/manual_kubeletconfig.yaml delete mode 100644 test/e2e/pao/testdata/render-expected-output/manual_machineconfig.yaml delete mode 100644 test/e2e/pao/testdata/render-expected-output/manual_runtimeclass.yaml delete mode 100644 test/e2e/pao/testdata/render-expected-output/manual_tuned.yaml delete mode 100644 tools/docs-generator/docs-generator.go delete mode 100644 vendor/github.com/RHsyseng/operator-utils/LICENSE delete mode 100644 vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema.go delete mode 100644 vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema_sync.go delete mode 100644 vendor/github.com/StackExchange/wmi/LICENSE delete mode 100644 vendor/github.com/StackExchange/wmi/README.md delete mode 100644 vendor/github.com/StackExchange/wmi/swbemservices.go delete mode 100644 vendor/github.com/StackExchange/wmi/wmi.go rename vendor/github.com/{coreos => ajeddeloh}/go-json/README (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/decode.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/encode.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/fold.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/indent.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/scanner.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/stream.go (100%) rename vendor/github.com/{coreos => ajeddeloh}/go-json/tags.go (100%) delete mode 100644 vendor/github.com/asaskevich/govalidator/.gitignore delete mode 100644 vendor/github.com/asaskevich/govalidator/.travis.yml delete mode 100644 vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md delete mode 100644 vendor/github.com/asaskevich/govalidator/LICENSE delete mode 100644 vendor/github.com/asaskevich/govalidator/README.md delete mode 100644 vendor/github.com/asaskevich/govalidator/arrays.go delete mode 100644 vendor/github.com/asaskevich/govalidator/converter.go delete mode 100644 vendor/github.com/asaskevich/govalidator/doc.go delete mode 100644 vendor/github.com/asaskevich/govalidator/error.go delete mode 100644 vendor/github.com/asaskevich/govalidator/numerics.go delete mode 100644 vendor/github.com/asaskevich/govalidator/patterns.go delete mode 100644 vendor/github.com/asaskevich/govalidator/types.go delete mode 100644 vendor/github.com/asaskevich/govalidator/utils.go delete mode 100644 vendor/github.com/asaskevich/govalidator/validator.go delete mode 100644 vendor/github.com/asaskevich/govalidator/wercker.yml delete mode 100644 vendor/github.com/blang/semver/v4/LICENSE delete mode 100644 vendor/github.com/blang/semver/v4/json.go delete mode 100644 vendor/github.com/blang/semver/v4/range.go delete mode 100644 vendor/github.com/blang/semver/v4/semver.go delete mode 100644 vendor/github.com/blang/semver/v4/sort.go delete mode 100644 vendor/github.com/blang/semver/v4/sql.go delete mode 100644 vendor/github.com/coreos/go-systemd/LICENSE delete mode 100644 vendor/github.com/coreos/go-systemd/NOTICE delete mode 100644 vendor/github.com/coreos/go-systemd/unit/deserialize.go delete mode 100644 vendor/github.com/coreos/go-systemd/unit/escape.go delete mode 100644 vendor/github.com/coreos/go-systemd/unit/option.go delete mode 100644 vendor/github.com/coreos/go-systemd/unit/serialize.go delete mode 100644 vendor/github.com/coreos/ignition/LICENSE delete mode 100644 vendor/github.com/coreos/ignition/NOTICE delete mode 100644 vendor/github.com/coreos/ignition/config/shared/errors/errors.go delete mode 100644 vendor/github.com/coreos/ignition/config/shared/validations/unit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/ca.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/config.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/directory.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/disk.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/file.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/filesystem.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/ignition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/mode.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/node.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/partition.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/passwd.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/path.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/raid.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/schema.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/unit.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/url.go delete mode 100644 vendor/github.com/coreos/ignition/config/v2_2/types/verification.go delete mode 100644 vendor/github.com/coreos/ignition/config/validate/report/report.go delete mode 100644 vendor/github.com/ghodss/yaml/yaml_go110.go delete mode 100644 vendor/github.com/go-ole/go-ole/.travis.yml delete mode 100644 vendor/github.com/go-ole/go-ole/ChangeLog.md delete mode 100644 vendor/github.com/go-ole/go-ole/LICENSE delete mode 100644 vendor/github.com/go-ole/go-ole/README.md delete mode 100644 vendor/github.com/go-ole/go-ole/appveyor.yml delete mode 100644 vendor/github.com/go-ole/go-ole/com.go delete mode 100644 vendor/github.com/go-ole/go-ole/com_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/connect.go delete mode 100644 vendor/github.com/go-ole/go-ole/constants.go delete mode 100644 vendor/github.com/go-ole/go-ole/error.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/error_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/guid.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/idispatch_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/ienumvariant_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iinspectable_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/itypeinfo_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/iunknown_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/ole.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/go-get.go delete mode 100644 vendor/github.com/go-ole/go-ole/oleutil/oleutil.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_func.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearray_windows.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayconversion.go delete mode 100644 vendor/github.com/go-ole/go-ole/safearrayslices.go delete mode 100644 vendor/github.com/go-ole/go-ole/utility.go delete mode 100644 vendor/github.com/go-ole/go-ole/variables.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_386.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_amd64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_386.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_date_amd64.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_ppc64le.go delete mode 100644 vendor/github.com/go-ole/go-ole/variant_s390x.go delete mode 100644 vendor/github.com/go-ole/go-ole/vt_string.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt.go delete mode 100644 vendor/github.com/go-ole/go-ole/winrt_doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/.codecov.yml delete mode 100644 vendor/github.com/go-openapi/analysis/.gitattributes delete mode 100644 vendor/github.com/go-openapi/analysis/.gitignore delete mode 100644 vendor/github.com/go-openapi/analysis/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/analysis/LICENSE delete mode 100644 vendor/github.com/go-openapi/analysis/README.md delete mode 100644 vendor/github.com/go-openapi/analysis/analyzer.go delete mode 100644 vendor/github.com/go-openapi/analysis/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/analysis/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/doc.go delete mode 100644 vendor/github.com/go-openapi/analysis/fixer.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_name.go delete mode 100644 vendor/github.com/go-openapi/analysis/flatten_options.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/debug/debug.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go delete mode 100644 vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go delete mode 100644 vendor/github.com/go-openapi/analysis/mixin.go delete mode 100644 vendor/github.com/go-openapi/analysis/schema.go delete mode 100644 vendor/github.com/go-openapi/errors/.gitignore delete mode 100644 vendor/github.com/go-openapi/errors/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/errors/.travis.yml delete mode 100644 vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/errors/LICENSE delete mode 100644 vendor/github.com/go-openapi/errors/README.md delete mode 100644 vendor/github.com/go-openapi/errors/api.go delete mode 100644 vendor/github.com/go-openapi/errors/auth.go delete mode 100644 vendor/github.com/go-openapi/errors/doc.go delete mode 100644 vendor/github.com/go-openapi/errors/headers.go delete mode 100644 vendor/github.com/go-openapi/errors/middleware.go delete mode 100644 vendor/github.com/go-openapi/errors/parsing.go delete mode 100644 vendor/github.com/go-openapi/errors/schema.go delete mode 100644 vendor/github.com/go-openapi/loads/.editorconfig delete mode 100644 vendor/github.com/go-openapi/loads/.gitignore delete mode 100644 vendor/github.com/go-openapi/loads/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/loads/.travis.yml delete mode 100644 vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/loads/LICENSE delete mode 100644 vendor/github.com/go-openapi/loads/README.md delete mode 100644 vendor/github.com/go-openapi/loads/doc.go delete mode 100644 vendor/github.com/go-openapi/loads/loaders.go delete mode 100644 vendor/github.com/go-openapi/loads/options.go delete mode 100644 vendor/github.com/go-openapi/loads/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/.editorconfig delete mode 100644 vendor/github.com/go-openapi/spec/.gitignore delete mode 100644 vendor/github.com/go-openapi/spec/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/spec/LICENSE delete mode 100644 vendor/github.com/go-openapi/spec/README.md delete mode 100644 vendor/github.com/go-openapi/spec/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/spec/bindata.go delete mode 100644 vendor/github.com/go-openapi/spec/cache.go delete mode 100644 vendor/github.com/go-openapi/spec/contact_info.go delete mode 100644 vendor/github.com/go-openapi/spec/debug.go delete mode 100644 vendor/github.com/go-openapi/spec/errors.go delete mode 100644 vendor/github.com/go-openapi/spec/expander.go delete mode 100644 vendor/github.com/go-openapi/spec/external_docs.go delete mode 100644 vendor/github.com/go-openapi/spec/header.go delete mode 100644 vendor/github.com/go-openapi/spec/info.go delete mode 100644 vendor/github.com/go-openapi/spec/items.go delete mode 100644 vendor/github.com/go-openapi/spec/license.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_nonwindows.go delete mode 100644 vendor/github.com/go-openapi/spec/normalizer_windows.go delete mode 100644 vendor/github.com/go-openapi/spec/operation.go delete mode 100644 vendor/github.com/go-openapi/spec/parameter.go delete mode 100644 vendor/github.com/go-openapi/spec/path_item.go delete mode 100644 vendor/github.com/go-openapi/spec/paths.go delete mode 100644 vendor/github.com/go-openapi/spec/properties.go delete mode 100644 vendor/github.com/go-openapi/spec/ref.go delete mode 100644 vendor/github.com/go-openapi/spec/resolver.go delete mode 100644 vendor/github.com/go-openapi/spec/response.go delete mode 100644 vendor/github.com/go-openapi/spec/responses.go delete mode 100644 vendor/github.com/go-openapi/spec/schema.go delete mode 100644 vendor/github.com/go-openapi/spec/schema_loader.go delete mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go delete mode 100644 vendor/github.com/go-openapi/spec/spec.go delete mode 100644 vendor/github.com/go-openapi/spec/swagger.go delete mode 100644 vendor/github.com/go-openapi/spec/tag.go delete mode 100644 vendor/github.com/go-openapi/spec/validations.go delete mode 100644 vendor/github.com/go-openapi/spec/xml_object.go delete mode 100644 vendor/github.com/go-openapi/strfmt/.editorconfig delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitattributes delete mode 100644 vendor/github.com/go-openapi/strfmt/.gitignore delete mode 100644 vendor/github.com/go-openapi/strfmt/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/strfmt/LICENSE delete mode 100644 vendor/github.com/go-openapi/strfmt/README.md delete mode 100644 vendor/github.com/go-openapi/strfmt/bson.go delete mode 100644 vendor/github.com/go-openapi/strfmt/date.go delete mode 100644 vendor/github.com/go-openapi/strfmt/default.go delete mode 100644 vendor/github.com/go-openapi/strfmt/doc.go delete mode 100644 vendor/github.com/go-openapi/strfmt/duration.go delete mode 100644 vendor/github.com/go-openapi/strfmt/format.go delete mode 100644 vendor/github.com/go-openapi/strfmt/time.go delete mode 100644 vendor/github.com/go-openapi/strfmt/ulid.go delete mode 100644 vendor/github.com/go-openapi/swag/.gitattributes rename vendor/github.com/go-openapi/{spec => swag}/.travis.yml (86%) delete mode 100644 vendor/github.com/go-openapi/swag/file.go delete mode 100644 vendor/github.com/go-openapi/validate/.editorconfig delete mode 100644 vendor/github.com/go-openapi/validate/.gitattributes delete mode 100644 vendor/github.com/go-openapi/validate/.gitignore delete mode 100644 vendor/github.com/go-openapi/validate/.golangci.yml delete mode 100644 vendor/github.com/go-openapi/validate/CODE_OF_CONDUCT.md delete mode 100644 vendor/github.com/go-openapi/validate/LICENSE delete mode 100644 vendor/github.com/go-openapi/validate/README.md delete mode 100644 vendor/github.com/go-openapi/validate/appveyor.yml delete mode 100644 vendor/github.com/go-openapi/validate/context.go delete mode 100644 vendor/github.com/go-openapi/validate/debug.go delete mode 100644 vendor/github.com/go-openapi/validate/default_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/doc.go delete mode 100644 vendor/github.com/go-openapi/validate/example_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/formats.go delete mode 100644 vendor/github.com/go-openapi/validate/helpers.go delete mode 100644 vendor/github.com/go-openapi/validate/object_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/options.go delete mode 100644 vendor/github.com/go-openapi/validate/result.go delete mode 100644 vendor/github.com/go-openapi/validate/rexp.go delete mode 100644 vendor/github.com/go-openapi/validate/schema.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_option.go delete mode 100644 vendor/github.com/go-openapi/validate/schema_props.go delete mode 100644 vendor/github.com/go-openapi/validate/slice_validator.go delete mode 100644 vendor/github.com/go-openapi/validate/spec.go delete mode 100644 vendor/github.com/go-openapi/validate/spec_messages.go delete mode 100644 vendor/github.com/go-openapi/validate/type.go delete mode 100644 vendor/github.com/go-openapi/validate/update-fixtures.sh delete mode 100644 vendor/github.com/go-openapi/validate/validator.go delete mode 100644 vendor/github.com/go-openapi/validate/values.go delete mode 100644 vendor/github.com/go-stack/stack/.travis.yml delete mode 100644 vendor/github.com/go-stack/stack/LICENSE.md delete mode 100644 vendor/github.com/go-stack/stack/README.md delete mode 100644 vendor/github.com/go-stack/stack/stack.go delete mode 100644 vendor/github.com/google/gofuzz/bytesource/bytesource.go delete mode 100644 vendor/github.com/jaypipes/ghw/.get-go-packages.sh delete mode 100644 vendor/github.com/jaypipes/ghw/.gitignore delete mode 100644 vendor/github.com/jaypipes/ghw/.travis.yml delete mode 100644 vendor/github.com/jaypipes/ghw/COPYING delete mode 100644 vendor/github.com/jaypipes/ghw/Dockerfile delete mode 100644 vendor/github.com/jaypipes/ghw/Makefile delete mode 100644 vendor/github.com/jaypipes/ghw/README.md delete mode 100644 vendor/github.com/jaypipes/ghw/SNAPSHOT.md delete mode 100644 vendor/github.com/jaypipes/ghw/alias.go delete mode 100644 vendor/github.com/jaypipes/ghw/doc.go delete mode 100644 vendor/github.com/jaypipes/ghw/host.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/bios/bios.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/block/block.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/context/context.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/net/net.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/option/option.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/pci/pci.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/product/product.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/topology/topology.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go delete mode 100644 vendor/github.com/jaypipes/ghw/pkg/util/util.go delete mode 100644 vendor/github.com/jaypipes/pcidb/.get-go-packages.sh delete mode 100644 vendor/github.com/jaypipes/pcidb/.gitignore delete mode 100644 vendor/github.com/jaypipes/pcidb/.travis.yml delete mode 100644 vendor/github.com/jaypipes/pcidb/COPYING delete mode 100644 vendor/github.com/jaypipes/pcidb/Gopkg.lock delete mode 100644 vendor/github.com/jaypipes/pcidb/Gopkg.toml delete mode 100644 vendor/github.com/jaypipes/pcidb/LICENSE delete mode 100644 vendor/github.com/jaypipes/pcidb/Makefile delete mode 100644 vendor/github.com/jaypipes/pcidb/README.md delete mode 100644 vendor/github.com/jaypipes/pcidb/context.go delete mode 100644 vendor/github.com/jaypipes/pcidb/discover.go delete mode 100644 vendor/github.com/jaypipes/pcidb/main.go delete mode 100644 vendor/github.com/jaypipes/pcidb/parse.go delete mode 100644 vendor/github.com/mitchellh/go-homedir/LICENSE delete mode 100644 vendor/github.com/mitchellh/go-homedir/README.md delete mode 100644 vendor/github.com/mitchellh/go-homedir/homedir.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/CHANGELOG.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/LICENSE delete mode 100644 vendor/github.com/mitchellh/mapstructure/README.md delete mode 100644 vendor/github.com/mitchellh/mapstructure/decode_hooks.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/error.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/mapstructure.go delete mode 100644 vendor/github.com/moby/spdystream/CONTRIBUTING.md delete mode 100644 vendor/github.com/moby/spdystream/LICENSE delete mode 100644 vendor/github.com/moby/spdystream/MAINTAINERS delete mode 100644 vendor/github.com/moby/spdystream/NOTICE delete mode 100644 vendor/github.com/moby/spdystream/README.md delete mode 100644 vendor/github.com/moby/spdystream/connection.go delete mode 100644 vendor/github.com/moby/spdystream/handlers.go delete mode 100644 vendor/github.com/moby/spdystream/priority.go delete mode 100644 vendor/github.com/moby/spdystream/spdy/dictionary.go delete mode 100644 vendor/github.com/moby/spdystream/spdy/read.go delete mode 100644 vendor/github.com/moby/spdystream/spdy/types.go delete mode 100644 vendor/github.com/moby/spdystream/spdy/write.go delete mode 100644 vendor/github.com/moby/spdystream/stream.go delete mode 100644 vendor/github.com/moby/spdystream/utils.go delete mode 100644 vendor/github.com/oklog/ulid/.gitignore delete mode 100644 vendor/github.com/oklog/ulid/.travis.yml delete mode 100644 vendor/github.com/oklog/ulid/AUTHORS.md delete mode 100644 vendor/github.com/oklog/ulid/CHANGELOG.md delete mode 100644 vendor/github.com/oklog/ulid/CONTRIBUTING.md delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.lock delete mode 100644 vendor/github.com/oklog/ulid/Gopkg.toml delete mode 100644 vendor/github.com/oklog/ulid/LICENSE delete mode 100644 vendor/github.com/oklog/ulid/README.md delete mode 100644 vendor/github.com/oklog/ulid/ulid.go delete mode 100644 vendor/github.com/onsi/ginkgo/extensions/table/table.go delete mode 100644 vendor/github.com/onsi/ginkgo/extensions/table/table_entry.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/elements.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/errors/nested_types.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/fields.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/ignore.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/keys.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/pointer.go delete mode 100644 vendor/github.com/onsi/gomega/gstruct/types.go delete mode 100644 vendor/github.com/openshift/custom-resource-status/LICENSE delete mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go delete mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go delete mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go delete mode 100644 vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/operator-framework/api/LICENSE delete mode 100644 vendor/github.com/operator-framework/api/pkg/lib/version/version.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/doc.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/register.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/doc.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/groupversion_info.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/operator_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/operatorcondition_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/operatorgroup_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/catalogsource_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/doc.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/register.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/subscription_types.go delete mode 100644 vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/LICENSE delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/doc.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/register.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/catalogsource_types.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/clusterserviceversion.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/clusterserviceversion_types.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/doc.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/installplan_types.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/register.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/subscription_types.go delete mode 100644 vendor/github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/sirupsen/logrus/.gitignore delete mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml delete mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml delete mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/github.com/sirupsen/logrus/LICENSE delete mode 100644 vendor/github.com/sirupsen/logrus/README.md delete mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go delete mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml delete mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go delete mode 100644 vendor/github.com/sirupsen/logrus/doc.go delete mode 100644 vendor/github.com/sirupsen/logrus/entry.go delete mode 100644 vendor/github.com/sirupsen/logrus/exported.go delete mode 100644 vendor/github.com/sirupsen/logrus/formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/hooks.go delete mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/logger.go delete mode 100644 vendor/github.com/sirupsen/logrus/logrus.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go delete mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go delete mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go delete mode 100644 vendor/github.com/sirupsen/logrus/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/LICENSE delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bson_1_8.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/array_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/bsoncodec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/byte_slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/cond_addr_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_decoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/default_value_encoders.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/empty_interface_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/map_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/pointer_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/proxy.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/slice_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/string_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/struct_tag_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/time_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsoncodec/uint_codec.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/byte_slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/empty_interface_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/map_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/slice_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/string_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/struct_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/time_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonoptions/uint_codec_options.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/copier.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_parser.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_wrappers.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/extjson_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/json_scanner.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/mode.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_reader.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/value_writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsonrw/writer.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/bsontype/bsontype.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/decoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/doc.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/encoder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/marshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/decimal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/objectid.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive/primitive.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/primitive_codecs.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/raw_value.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/registry.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/types.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/bson/unmarshal.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/array.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_arraybuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bson_documentbuilder.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/bsoncore.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/document_sequence.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/element.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/tables.go delete mode 100644 vendor/go.mongodb.org/mongo-driver/x/bsonx/bsoncore/value.go delete mode 100644 vendor/golang.org/x/net/idna/go118.go delete mode 100644 vendor/golang.org/x/net/idna/pre_go118.go delete mode 100644 vendor/howett.net/plist/.gitlab-ci.yml delete mode 100644 vendor/howett.net/plist/LICENSE delete mode 100644 vendor/howett.net/plist/README.md delete mode 100644 vendor/howett.net/plist/bplist.go delete mode 100644 vendor/howett.net/plist/bplist_generator.go delete mode 100644 vendor/howett.net/plist/bplist_parser.go delete mode 100644 vendor/howett.net/plist/decode.go delete mode 100644 vendor/howett.net/plist/doc.go delete mode 100644 vendor/howett.net/plist/encode.go delete mode 100644 vendor/howett.net/plist/fuzz.go delete mode 100644 vendor/howett.net/plist/marshal.go delete mode 100644 vendor/howett.net/plist/must.go delete mode 100644 vendor/howett.net/plist/plist.go delete mode 100644 vendor/howett.net/plist/plist_types.go delete mode 100644 vendor/howett.net/plist/text_generator.go delete mode 100644 vendor/howett.net/plist/text_parser.go delete mode 100644 vendor/howett.net/plist/text_tables.go delete mode 100644 vendor/howett.net/plist/typeinfo.go delete mode 100644 vendor/howett.net/plist/unmarshal.go delete mode 100644 vendor/howett.net/plist/util.go delete mode 100644 vendor/howett.net/plist/xml_generator.go delete mode 100644 vendor/howett.net/plist/xml_parser.go delete mode 100644 vendor/howett.net/plist/zerocopy.go delete mode 100644 vendor/howett.net/plist/zerocopy_appengine.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/doc.go delete mode 100644 vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go delete mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go delete mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go delete mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go delete mode 100644 vendor/k8s.io/client-go/util/exec/exec.go delete mode 100644 vendor/k8s.io/kubelet/LICENSE delete mode 100644 vendor/k8s.io/kubelet/config/v1beta1/doc.go delete mode 100644 vendor/k8s.io/kubelet/config/v1beta1/register.go delete mode 100644 vendor/k8s.io/kubelet/config/v1beta1/types.go delete mode 100644 vendor/k8s.io/kubelet/config/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/kubernetes/LICENSE delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/OWNERS delete mode 100644 vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpuset/cpuset.go delete mode 100644 vendor/kubevirt.io/qe-tools/LICENSE delete mode 100644 vendor/kubevirt.io/qe-tools/pkg/ginkgo-reporters/README.md delete mode 100644 vendor/kubevirt.io/qe-tools/pkg/ginkgo-reporters/junit_reporter.go delete mode 100644 vendor/kubevirt.io/qe-tools/pkg/ginkgo-reporters/polarion_reporter.go delete mode 100644 vendor/kubevirt.io/qe-tools/pkg/polarion-xml/polarion_xml.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/client.go delete mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/client/fake/doc.go diff --git a/Makefile b/Makefile index 3957d7644..2196e02fe 100644 --- a/Makefile +++ b/Makefile @@ -18,10 +18,11 @@ TUNED_COMMIT:=682c47c0a9eb5596c2d396b6d0dae4e297414c50 TUNED_DIR:=daemon # API-related variables -API_TYPES_DIR:=pkg/apis -API_TYPES:=$(shell find $(API_TYPES_DIR) -name \*_types.go) +API_TYPES_DIR:=./pkg/apis/tuned/v1 +API_TYPES:=$(wildcard $(API_TYPES_DIR)/*_types.go) API_ZZ_GENERATED:=zz_generated.deepcopy -API_GO_HEADER_FILE:=$(API_TYPES_DIR)/header.go.txt +API_TYPES_GENERATED:=$(API_TYPES_DIR)/$(API_ZZ_GENERATED).go +API_GO_HEADER_FILE:=pkg/apis/header.go.txt # Container image-related variables IMAGE_BUILD_CMD=podman build --no-cache @@ -32,10 +33,6 @@ ORG=openshift TAG=$(shell git rev-parse --abbrev-ref HEAD) IMAGE=$(REGISTRY)/$(ORG)/origin-cluster-node-tuning-operator:$(TAG) -# PAO variables -CLUSTER ?= "ci" -PAO_CRD_APIS :=$(addprefix ./$(API_TYPES_DIR)/pao/,v2 v1 v1alpha1) - all: build # Do not put any includes above the "all" target. We want the default target to build @@ -62,25 +59,25 @@ $(BINDATA): $(GOBINDATA_BIN) $(ASSETS) pkg/generated: $(API_TYPES) $(GO) run k8s.io/code-generator/cmd/deepcopy-gen \ - --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v1alpha1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v1,$(PACKAGE)/$(API_TYPES_DIR)/pao/v2 \ + --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \ -O $(API_ZZ_GENERATED) \ --go-header-file $(API_GO_HEADER_FILE) \ - --bounding-dirs $(PACKAGE)/$(API_TYPES_DIR) \ + --bounding-dirs $(PACKAGE)/pkg/apis \ --output-base tmp $(GO) run k8s.io/code-generator/cmd/client-gen \ --clientset-name versioned \ --input-base '' \ - --input $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \ + --input $(PACKAGE)/pkg/apis/tuned/v1 \ --go-header-file $(API_GO_HEADER_FILE) \ --output-package $(PACKAGE)/pkg/generated/clientset \ --output-base tmp $(GO) run k8s.io/code-generator/cmd/lister-gen \ - --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \ + --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \ --go-header-file $(API_GO_HEADER_FILE) \ --output-package $(PACKAGE)/pkg/generated/listers \ --output-base tmp $(GO) run k8s.io/code-generator/cmd/informer-gen \ - --input-dirs $(PACKAGE)/$(API_TYPES_DIR)/tuned/v1 \ + --input-dirs $(PACKAGE)/pkg/apis/tuned/v1 \ --versioned-clientset-package $(PACKAGE)/pkg/generated/clientset/versioned \ --listers-package $(PACKAGE)/pkg/generated/listers \ --go-header-file $(API_GO_HEADER_FILE) \ @@ -89,6 +86,7 @@ pkg/generated: $(API_TYPES) tar c tmp | tar x --strip-components=4 touch $@ + $(GOBINDATA_BIN): $(GO) build -o $(GOBINDATA_BIN) ./vendor/github.com/kevinburke/go-bindata/go-bindata @@ -132,8 +130,7 @@ local-image-push: # $2 - apis # $3 - manifests # $4 - output -$(call add-crd-gen,tuned,./$(API_TYPES_DIR)/tuned/v1,./manifests,./manifests) -$(call add-crd-gen,pao,$(PAO_CRD_APIS),./manifests,./manifests) +$(call add-crd-gen,tuned,$(API_TYPES_DIR),./manifests,./manifests) # This will include additional actions on the update and verify targets to ensure that profile patches are applied # to manifest files @@ -144,29 +141,3 @@ $(call add-crd-gen,pao,$(PAO_CRD_APIS),./manifests,./manifests) $(call add-profile-manifests,manifests,./profile-patches,./manifests) .PHONY: all build deepcopy crd-schema-gen test-e2e verify verify-gofmt clean local-image local-image-push - -# PAO - -.PHONY: cluster-deploy-pao -cluster-deploy-pao: - @echo "Deploying PAO artifacts" - CLUSTER=$(CLUSTER) hack/deploy.sh - -.PHONY: cluster-label-worker-cnf -cluster-label-worker-cnf: - @echo "Adding worker-cnf label to worker nodes" - hack/label-worker-cnf.sh - -.PHONY: pao-functests -pao-functests: cluster-label-worker-cnf pao-functests-only - -.PHONY: pao-functests-only -pao-functests-only: - @echo "Cluster Version" - hack/show-cluster-version.sh - hack/run-functests.sh - -.PHONY: cluster-clean-pao -cluster-clean-pao: - @echo "Cleaning up performance addons artifacts" - hack/clean-deploy.sh diff --git a/assets/pao/assets.go b/assets/pao/assets.go deleted file mode 100644 index d4367291b..000000000 --- a/assets/pao/assets.go +++ /dev/null @@ -1,19 +0,0 @@ -package assets - -import ( - "embed" -) - -var ( - // Configs contains all files that placed under the configs directory - //go:embed configs - Configs embed.FS - - // Scripts contains all files that placed under the scripts directory - //go:embed scripts - Scripts embed.FS - - // Tuned contains all files that placed under the tuned directory - //go:embed tuned - Tuned embed.FS -) diff --git a/assets/pao/configs/99-low-latency-hooks.json b/assets/pao/configs/99-low-latency-hooks.json deleted file mode 100644 index cbf6085c6..000000000 --- a/assets/pao/configs/99-low-latency-hooks.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "version": "1.0.0", - "hook": { - "path": "/usr/local/bin/low-latency-hooks.sh", - "args": ["low-latency-hooks.sh", "{{.RPSMask}}"] - }, - "when": { - "always": true - }, - "stages": ["prestart"] -} diff --git a/assets/pao/configs/99-netdev-rps.rules b/assets/pao/configs/99-netdev-rps.rules deleted file mode 100644 index 4e6d346af..000000000 --- a/assets/pao/configs/99-netdev-rps.rules +++ /dev/null @@ -1 +0,0 @@ -SUBSYSTEM=="net", ACTION=="add", ENV{DEVPATH}!="/devices/virtual/net/veth*", TAG+="systemd", ENV{SYSTEMD_WANTS}="update-rps@%k.service" diff --git a/assets/pao/configs/99-runtimes.conf b/assets/pao/configs/99-runtimes.conf deleted file mode 100644 index c44a5fff0..000000000 --- a/assets/pao/configs/99-runtimes.conf +++ /dev/null @@ -1,20 +0,0 @@ -{{if .ReservedCpus}} -[crio.runtime] -infra_ctr_cpuset = "{{.ReservedCpus}}" -{{end}} - -# We should copy paste the default runtime because this snippet will override the whole runtimes section -[crio.runtime.runtimes.runc] -runtime_path = "" -runtime_type = "oci" -runtime_root = "/run/runc" - -# The CRI-O will check the allowed_annotations under the runtime handler and apply high-performance hooks when one of -# high-performance annotations presents under it. -# We should provide the runtime_path because we need to inform that we want to re-use runc binary and we -# do not have high-performance binary under the $PATH that will point to it. -[crio.runtime.runtimes.high-performance] -runtime_path = "/bin/runc" -runtime_type = "oci" -runtime_root = "/run/runc" -allowed_annotations = ["cpu-load-balancing.crio.io", "cpu-quota.crio.io", "irq-load-balancing.crio.io"] diff --git a/assets/pao/scripts/hugepages-allocation.sh b/assets/pao/scripts/hugepages-allocation.sh deleted file mode 100755 index dd6a25a40..000000000 --- a/assets/pao/scripts/hugepages-allocation.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -nodes_path="/sys/devices/system/node" -hugepages_file="${nodes_path}/node${NUMA_NODE}/hugepages/hugepages-${HUGEPAGES_SIZE}kB/nr_hugepages" - -if [ ! -f "${hugepages_file}" ]; then - echo "ERROR: ${hugepages_file} does not exist" - exit 1 -fi - -timeout=60 -sample=1 -current_time=0 -while [ "$(cat "${hugepages_file}")" -ne "${HUGEPAGES_COUNT}" ]; do - echo "${HUGEPAGES_COUNT}" >"${hugepages_file}" - - current_time=$((current_time + sample)) - if [ $current_time -gt $timeout ]; then - echo "ERROR: ${hugepages_file} does not have the expected number of hugepages ${HUGEPAGES_COUNT}" - exit 1 - fi - - sleep $sample -done diff --git a/assets/pao/scripts/low-latency-hooks.sh b/assets/pao/scripts/low-latency-hooks.sh deleted file mode 100644 index aff54afd1..000000000 --- a/assets/pao/scripts/low-latency-hooks.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env bash - -mask="${1}" -[ -n "${mask}" ] || { logger "${0}: The rps-mask parameter is missing" ; exit 0; } - -pid=$(jq '.pid' /dev/stdin 2>&1) -[[ $? -eq 0 && -n "${pid}" ]] || { logger "${0}: Failed to extract the pid: ${pid}"; exit 0; } - -ns=$(ip netns identify "${pid}" 2>&1) -[[ $? -eq 0 && -n "${ns}" ]] || { logger "${0} Failed to identify the namespace: ${ns}"; exit 0; } - -# Updates the container veth RPS mask on the node -netns_link_indexes=$(ip netns exec "${ns}" ip -j link | jq ".[] | select(.link_index != null) | .link_index") -for link_index in ${netns_link_indexes}; do - container_veth=$(ip -j link | jq ".[] | select(.ifindex == ${link_index}) | .ifname" | tr -d '"') - echo ${mask} > /sys/devices/virtual/net/${container_veth}/queues/rx-0/rps_cpus -done - -# Updates the RPS mask for the interface inside of the container network namespace -mode=$(ip netns exec "${ns}" [ -w /sys ] && echo "rw" || echo "ro" 2>&1) -[ $? -eq 0 ] || { logger "${0} Failed to determine if the /sys is writable: ${mode}"; exit 0; } - -if [ "${mode}" = "ro" ]; then - res=$(ip netns exec "${ns}" mount -o remount,rw /sys 2>&1) - [ $? -eq 0 ] || { logger "${0}: Failed to remount /sys as rw: ${res}"; exit 0; } -fi - -# /sys/class/net can't be used recursively to find the rps_cpus file, use /sys/devices instead -res=$(ip netns exec "${ns}" find /sys/devices -type f -name rps_cpus -exec sh -c "echo ${mask} | cat > {}" \; 2>&1) -[[ $? -eq 0 && -z "${res}" ]] || logger "${0}: Failed to apply the RPS mask: ${res}" - -if [ "${mode}" = "ro" ]; then - ip netns exec "${ns}" mount -o remount,ro /sys - [ $? -eq 0 ] || exit 1 # Error out so the pod will not start with a writable /sys -fi diff --git a/assets/pao/scripts/set-rps-mask.sh b/assets/pao/scripts/set-rps-mask.sh deleted file mode 100644 index e6bb827a3..000000000 --- a/assets/pao/scripts/set-rps-mask.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -dev=$1 -[ -n "${dev}" ] || { echo "The device argument is missing" >&2 ; exit 1; } - -mask=$2 -[ -n "${mask}" ] || { echo "The mask argument is missing" >&2 ; exit 1; } - -dev_dir="/sys/class/net/${dev}" - -function find_dev_dir { - systemd_devs=$(systemctl list-units -t device | grep sys-subsystem-net-devices | cut -d' ' -f1) - - for systemd_dev in ${systemd_devs}; do - dev_sysfs=$(systemctl show "${systemd_dev}" -p SysFSPath --value) - - dev_orig_name="${dev_sysfs##*/}" - if [ "${dev_orig_name}" = "${dev}" ]; then - dev_name="${systemd_dev##*-}" - dev_name="${dev_name%%.device}" - if [ "${dev_name}" = "${dev}" ]; then # disregard the original device unit - continue - fi - - echo "${dev} device was renamed to $dev_name" - dev_dir="/sys/class/net/${dev_name}" - break - fi - done -} - -[ -d "${dev_dir}" ] || find_dev_dir # the net device was renamed, find the new name -[ -d "${dev_dir}" ] || { sleep 5; find_dev_dir; } # search failed, wait a little and try again -[ -d "${dev_dir}" ] || { echo "${dev_dir}" directory not found >&2 ; exit 0; } # the interface disappeared, not an error - -find "${dev_dir}"/queues -type f -name rps_cpus -exec sh -c "echo ${mask} | cat > {}" \; \ No newline at end of file diff --git a/assets/pao/tuned/openshift-node-performance b/assets/pao/tuned/openshift-node-performance deleted file mode 100644 index 331f08af7..000000000 --- a/assets/pao/tuned/openshift-node-performance +++ /dev/null @@ -1,132 +0,0 @@ -[main] -summary=Openshift node optimized for deterministic performance at the cost of increased power consumption, focused on low latency network performance. Based on Tuned 2.11 and Cluster node tuning (oc 4.5) -include=openshift-node,cpu-partitioning - -# Inheritance of base profiles legend: -# cpu-partitioning -> network-latency -> latency-performance -# https://github.com/redhat-performance/tuned/blob/master/profiles/latency-performance/tuned.conf -# https://github.com/redhat-performance/tuned/blob/master/profiles/network-latency/tuned.conf -# https://github.com/redhat-performance/tuned/blob/master/profiles/cpu-partitioning/tuned.conf - -# All values are mapped with a comment where a parent profile contains them. -# Different values will override the original values in parent profiles. - -[variables] -#> isolated_cores take a list of ranges; e.g. isolated_cores=2,4-7 -{{if .IsolatedCpus}} -isolated_cores={{.IsolatedCpus}} -{{end}} - -not_isolated_cores_expanded=${f:cpulist_invert:${isolated_cores_expanded}} - -[cpu] -#> latency-performance -#> (override) -force_latency=cstate.id:1|3 -governor=performance -energy_perf_bias=performance -min_perf_pct=100 - -[service] -service.stalld=start,enable - -[vm] -#> network-latency -transparent_hugepages=never - -{{if not .GloballyDisableIrqLoadBalancing}} -[irqbalance] -#> Override the value set by cpu-partitioning with an empty one -banned_cpus="" -{{end}} - -[scheduler] -runtime=0 -group.ksoftirqd=0:f:11:*:ksoftirqd.* -group.rcuc=0:f:11:*:rcuc.* -sched_rt_runtime_us=-1 -sched_min_granularity_ns=10000000 -sched_migration_cost_ns=5000000 -numa_balancing=0 -{{if not .GloballyDisableIrqLoadBalancing}} -default_irq_smp_affinity = ignore -{{end}} - -[sysctl] -#> cpu-partitioning #realtime -kernel.hung_task_timeout_secs = 600 -#> cpu-partitioning #realtime -kernel.nmi_watchdog = 0 -#> realtime -kernel.sched_rt_runtime_us = -1 -# cpu-partitioning and realtime for RHEL disable it (= 0) -# OCP is too dynamic when partitioning and needs to evacuate -#> scheduled timers when starting a guaranteed workload (= 1) -kernel.timer_migration = 1 -#> network-latency -kernel.numa_balancing=0 -net.core.busy_read=50 -net.core.busy_poll=50 -net.ipv4.tcp_fastopen=3 -#> cpu-partitioning #realtime -vm.stat_interval = 10 - -# ktune sysctl settings for rhel6 servers, maximizing i/o throughput -# -# Minimal preemption granularity for CPU-bound tasks: -# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) -#> latency-performance -kernel.sched_min_granularity_ns=10000000 - -# If a workload mostly uses anonymous memory and it hits this limit, the entire -# working set is buffered for I/O, and any more write buffering would require -# swapping, so it's time to throttle writes until I/O can catch up. Workloads -# that mostly use file mappings may be able to use even higher values. -# -# The generator of dirty data starts writeback at this percentage (system default -# is 20%) -#> latency-performance -vm.dirty_ratio=10 - -# Start background writeback (via writeback threads) at this percentage (system -# default is 10%) -#> latency-performance -vm.dirty_background_ratio=3 - -# The swappiness parameter controls the tendency of the kernel to move -# processes out of physical memory and onto the swap disk. -# 0 tells the kernel to avoid swapping processes out of physical memory -# for as long as possible -# 100 tells the kernel to aggressively swap processes out of physical memory -# and move them to swap cache -#> latency-performance -vm.swappiness=10 - -# The total time the scheduler will consider a migrated process -# "cache hot" and thus less likely to be re-migrated -# (system default is 500000, i.e. 0.5 ms) -#> latency-performance -kernel.sched_migration_cost_ns=5000000 - -[selinux] -#> Custom (atomic host) -avc_cache_threshold=8192 - -{{if .NetDevices}} -{{.NetDevices}} -{{end}} - -[bootloader] -# set empty values to disable RHEL initrd setting in cpu-partitioning -initrd_remove_dir= -initrd_dst_img= -initrd_add_dir= -# overrides cpu-partitioning cmdline -cmdline_cpu_part=+nohz=on rcu_nocbs=${isolated_cores} tuned.non_isolcpus=${not_isolated_cpumask} intel_pstate=disable nosoftlockup -{{if .StaticIsolation}} -cmdline_realtime=+tsc=nowatchdog intel_iommu=on iommu=pt isolcpus=domain,managed_irq,${isolated_cores} systemd.cpu_affinity=${not_isolated_cores_expanded} -{{else}} -cmdline_realtime=+tsc=nowatchdog intel_iommu=on iommu=pt isolcpus=managed_irq,${isolated_cores} systemd.cpu_affinity=${not_isolated_cores_expanded} -{{end}} -cmdline_hugepages=+{{if .DefaultHugepagesSize}} default_hugepagesz={{.DefaultHugepagesSize}} {{end}} {{if .Hugepages}} {{.Hugepages}} {{end}} -cmdline_additionalArg=+{{if .AdditionalArgs}} {{.AdditionalArgs}} {{end}} diff --git a/cmd/cluster-node-tuning-operator/main.go b/cmd/cluster-node-tuning-operator/main.go index efde4a7a8..5e4e98a5e 100644 --- a/cmd/cluster-node-tuning-operator/main.go +++ b/cmd/cluster-node-tuning-operator/main.go @@ -7,20 +7,13 @@ import ( "runtime" apiconfigv1 "github.com/openshift/api/config/v1" - performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1" - performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - paocontroller "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller" mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - olmv1 "github.com/operator-framework/api/pkg/operators/v1" - olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apiruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/klog/v2" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/cache" tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" "github.com/openshift/cluster-node-tuning-operator/pkg/config" @@ -30,15 +23,14 @@ import ( "github.com/openshift/cluster-node-tuning-operator/pkg/tuned" "github.com/openshift/cluster-node-tuning-operator/pkg/util" "github.com/openshift/cluster-node-tuning-operator/version" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cache" ) const ( operandFilename = "openshift-tuned" operatorFilename = "cluster-node-tuning-operator" - webhookPort = 4343 - webhookCertDir = "/apiserver.local.config/certificates" - webhookCertName = "apiserver.crt" - webhookKeyName = "apiserver.key" + metricsHost = "0.0.0.0" ) var ( @@ -50,11 +42,6 @@ func init() { utilruntime.Must(tunedv1.AddToScheme(scheme)) utilruntime.Must(mcov1.AddToScheme(scheme)) utilruntime.Must(apiconfigv1.Install(scheme)) - utilruntime.Must(performancev1alpha1.AddToScheme(scheme)) - utilruntime.Must(performancev1.AddToScheme(scheme)) - utilruntime.Must(performancev2.AddToScheme(scheme)) - utilruntime.Must(olmv1alpha1.AddToScheme(scheme)) - utilruntime.Must(olmv1.AddToScheme(scheme)) } func printVersion() { @@ -90,7 +77,6 @@ func main() { ntoNamespace := config.OperatorNamespace() namespaces := []string{ ntoNamespace, - "openshift-performance-addon-operator", metav1.NamespaceNone, } @@ -114,41 +100,12 @@ func main() { controller, err := operator.NewController() if err != nil { - klog.Fatalf("failed to create new controller: %v", err) - } - - if err := mgr.Add(controller); err != nil { - klog.Fatalf("failed to add new controller to the manager: %v", err) - } - - if err := mgr.Add(metrics.Server{}); err != nil { - klog.Fatalf("unable to add metrics server as runnable under the manager: %v", err) + klog.Fatal(err) } + mgr.Add(controller) + mgr.Add(metrics.Server{}) metrics.RegisterVersion(version.Version) - if err = (&paocontroller.PerformanceProfileReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - Recorder: mgr.GetEventRecorderFor("performance-profile-controller"), - }).SetupWithManager(mgr); err != nil { - klog.Exitf("unable to create PerformanceProfile controller: %v", err) - } - - // Configure webhook server. - webHookServer := mgr.GetWebhookServer() - webHookServer.Port = webhookPort - webHookServer.CertDir = webhookCertDir - webHookServer.CertName = webhookCertName - webHookServer.KeyName = webhookKeyName - - if err = (&performancev1.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil { - klog.Exitf("unable to create PerformanceProfile v1 webhook: %v", err) - } - - if err = (&performancev2.PerformanceProfile{}).SetupWebhookWithManager(mgr); err != nil { - klog.Exitf("unable to create PerformanceProfile v2 webhook: %v", err) - } - if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { klog.Exitf("manager exited with non-zero code: %v", err) } diff --git a/cmd/performance-profile-creator/README.md b/cmd/performance-profile-creator/README.md deleted file mode 100644 index 874bd7f4e..000000000 --- a/cmd/performance-profile-creator/README.md +++ /dev/null @@ -1,99 +0,0 @@ -# Performance Profile Creator (PPC) -A tool to automate the process of creating Performance Profile using the user supplied profile parameters. - -## Software Components -1. A CLI tool part of the Performance Addon Operator image - -## Flow -1. PPC consumes a must-gather output. -1. PPC output is a bunch of YAML data (PAO profile + NTO tuned part). - -## Things to note before running Performance Profile Creator -1. Performance Profile Creator is present as an entrypoint (in /usr/local/bin/performance-profile-creator) in the Performance Addon Operator image. -1. It is assumed that we have a must-gather directory available where we run the tool. - 1. Option 1: Run must-gather tool like below and use its output dir when you run PPC. - ```bash - oc adm must-gather --image=quay.io/openshift-kni/performance-addon-operator-must-gather:4.9-snapshot --dest-dir= - ``` - 1. Option 2: Use an existing must-gather tarball decompressed to a directory. - -## Building Performance Profile Creator binary and image -Developers can build the Performance Profile Creator images from the source tree using make targets. - 1. Setup Environment variables - ```bash - export REGISTRY_NAMESPACE= - export IMAGE_TAG= #defaults to "latest" - export IMAGE_BUILD_CMD=podman - ``` -1. To build from Performance Profile Creator source: - ```bash - make create-performance-profile - ``` -1. To build the Performance addon Operator image from source: - ```bash - make operator-container - ``` -Alternatively, you can pull the latest master upstream image. In the following examples, TAG has the format major.minor-snapshot. For example, the TAG for OpenShift 4.11 will be 4.11-snapshot: - -```bash -podman pull quay.io/openshift-kni/performance-addon-operator:4.11-snapshot -``` - -## Running Performance Profile Creator -Depending on how the must-gather directory was set up the operator can now run the Performance Profile Creator tool with the required parameters. - -PPC Tool help output: -```bash -$ podman run --entrypoint performance-profile-creator quay.io/openshift-kni/performance-addon-operator:4.11-snapshot -h -A tool that automates creation of Performance Profiles - -Usage: - performance-profile-creator [flags] - -Flags: - --disable-ht Disable Hyperthreading - -h, --help help for performance-profile-creator - --info string Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: log, json] (default "log") - --mcp-name string MCP name corresponding to the target machines (required) - --must-gather-dir-path string Must gather directory path (default "must-gather") - --power-consumption-mode string The power consumption mode. [Valid values: default, low-latency, ultra-low-latency] (default "default") - --profile-name string Name of the performance profile to be created (default "performance") - --reserved-cpu-count int Number of reserved CPUs (required) - --rt-kernel Enable Real Time Kernel (required) - --split-reserved-cpus-across-numa Split the Reserved CPUs across NUMA nodes - --topology-manager-policy string Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: single-numa-node, best-effort, restricted] (default "restricted") - --user-level-networking Run with User level Networking(DPDK) enabled -``` - -1. Option 1: Example of using must-gather output dir (obtained after running must gather manually) along with required arguments - ```bash - podman run --entrypoint performance-profile-creator -v /path/to/must-gather-output:/must-gather:z \ - quay.io/openshift-kni/performance-addon-operator:4.11-snapshot --must-gather-dir-path /must-gather \ - --reserved-cpu-count 20 --mcp-name worker-cnf --rt-kernel false > performance-profile.yaml - ``` -1. Option 2: Example of using an existing must-gather tarball which is decompressed to a directory along with required arguments - ```bash - podman run --entrypoint performance-profile-creator -v /path/to/decompressed-tarball:/must-gather:z \ - quay.io/openshift-kni/performance-addon-operator:4.11-snapshot --must-gather-dir-path /must-gather \ - --reserved-cpu-count 20 --mcp-name worker-cnf --rt-kernel false > performance-profile.yaml - ``` - -## Running Performance Profile Creator using Wrapper script - -1. Example of how the following wrapper script can be used to create a performance profle: - ```bash - ./hack/run-perf-profile-creator.sh -t must-gather.tar.gz -- --mcp-name=worker-cnf --reserved-cpu-count=20 \ - --rt-kernel=false --split-reserved-cpus-across-numa=true --topology-manager-policy=restricted \ - --power-consumption-mode=low-latency > performace-profile.yaml - ``` - -## Discovery mode - -To learn about the key details of the cluster you want to create a profile for, you may use the `discovery` (aka `info`) mode: -```bash - ./hack/run-perf-profile-creator.sh -t must-gather.tar.gz -- --info=log - -``` - -The `info` option requires a value which drives the output format. Please refer to the online help of the performance-profile-creator -tool to learn about the supported formats. diff --git a/cmd/performance-profile-creator/cmd/root.go b/cmd/performance-profile-creator/cmd/root.go deleted file mode 100644 index f67fcabb7..000000000 --- a/cmd/performance-profile-creator/cmd/root.go +++ /dev/null @@ -1,573 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2021 Red Hat, Inc. - */ - -package cmd - -import ( - "encoding/json" - "fmt" - "io" - "os" - "sort" - "strconv" - "strings" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/profilecreator" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - log "github.com/sirupsen/logrus" - "sigs.k8s.io/yaml" - - "github.com/spf13/cobra" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - kubeletconfig "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" -) - -const ( - infoModeJSON = "json" - infoModeLog = "log" -) - -var ( - validTMPolicyValues = []string{kubeletconfig.SingleNumaNodeTopologyManagerPolicy, kubeletconfig.BestEffortTopologyManagerPolicy, kubeletconfig.RestrictedTopologyManagerPolicy} - validInfoModes = []string{infoModeLog, infoModeJSON} -) - -// ProfileData collects and stores all the data needed for profile creation -type ProfileData struct { - isolatedCPUs, reservedCPUs string - nodeSelector *metav1.LabelSelector - mcpSelector map[string]string - performanceProfileName string - topologyPoilcy string - rtKernel bool - additionalKernelArgs []string - userLevelNetworking *bool - disableHT bool -} - -// ClusterData collects the cluster wide information, each mcp points to a list of ghw node handlers -type ClusterData map[*machineconfigv1.MachineConfigPool][]*profilecreator.GHWHandler - -func init() { - log.SetOutput(os.Stderr) - log.SetFormatter(&log.TextFormatter{ - DisableTimestamp: true, - }) -} - -// NewRootCommand returns entrypoint command to interact with all other commands -func NewRootCommand() *cobra.Command { - pcArgs := &ProfileCreatorArgs{ - UserLevelNetworking: pointer.BoolPtr(false), - } - - var requiredFlags []string = []string{ - "reserved-cpu-count", - "mcp-name", - "rt-kernel", - "must-gather-dir-path", - } - - root := &cobra.Command{ - Use: "performance-profile-creator", - Short: "A tool that automates creation of Performance Profiles", - RunE: func(cmd *cobra.Command, args []string) error { - if cmd.Flag("info").Changed { - infoMode := cmd.Flag("info").Value.String() - if err := validateFlag("info", infoMode, validInfoModes); err != nil { - return err - } - - missingRequiredFlags := checkRequiredFlags(cmd, "must-gather-dir-path") - if len(missingRequiredFlags) > 0 { - return fmt.Errorf("missing required flags: %s", strings.Join(argNameToFlag(missingRequiredFlags), ", ")) - } - - mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String() - cluster, err := getClusterData(mustGatherDirPath) - if err != nil { - return fmt.Errorf("failed to parse the cluster data: %v", err) - } - - clusterInfo := makeClusterInfoFromClusterData(cluster) - if infoMode == infoModeJSON { - showClusterInfoJSON(clusterInfo) - } else { - showClusterInfoLog(clusterInfo) - } - return nil - } - - missingRequiredFlags := checkRequiredFlags(cmd, requiredFlags...) - if len(missingRequiredFlags) > 0 { - return fmt.Errorf("missing required flags: %s", strings.Join(argNameToFlag(missingRequiredFlags), ", ")) - } - - mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String() - cluster, err := getClusterData(mustGatherDirPath) - if err != nil { - return fmt.Errorf("failed to parse the cluster data: %v", err) - } - - profileCreatorArgsFromFlags, err := getDataFromFlags(cmd) - if err != nil { - return fmt.Errorf("failed to obtain data from flags %v", err) - } - profileData, err := getProfileData(profileCreatorArgsFromFlags, cluster) - if err != nil { - return err - } - createProfile(*profileData) - return nil - }, - } - - root.PersistentFlags().IntVar(&pcArgs.ReservedCPUCount, "reserved-cpu-count", 0, "Number of reserved CPUs (required)") - root.PersistentFlags().BoolVar(&pcArgs.SplitReservedCPUsAcrossNUMA, "split-reserved-cpus-across-numa", false, "Split the Reserved CPUs across NUMA nodes") - root.PersistentFlags().StringVar(&pcArgs.MCPName, "mcp-name", "", "MCP name corresponding to the target machines (required)") - root.PersistentFlags().BoolVar(&pcArgs.DisableHT, "disable-ht", false, "Disable Hyperthreading") - root.PersistentFlags().BoolVar(&pcArgs.RTKernel, "rt-kernel", false, "Enable Real Time Kernel (required)") - root.PersistentFlags().BoolVar(pcArgs.UserLevelNetworking, "user-level-networking", false, "Run with User level Networking(DPDK) enabled") - root.PersistentFlags().StringVar(&pcArgs.PowerConsumptionMode, "power-consumption-mode", profilecreator.ValidPowerConsumptionModes[0], fmt.Sprintf("The power consumption mode. [Valid values: %s]", strings.Join(profilecreator.ValidPowerConsumptionModes, ", "))) - root.PersistentFlags().StringVar(&pcArgs.MustGatherDirPath, "must-gather-dir-path", "must-gather", "Must gather directory path") - root.PersistentFlags().StringVar(&pcArgs.ProfileName, "profile-name", "performance", "Name of the performance profile to be created") - root.PersistentFlags().StringVar(&pcArgs.TMPolicy, "topology-manager-policy", kubeletconfig.RestrictedTopologyManagerPolicy, fmt.Sprintf("Kubelet Topology Manager Policy of the performance profile to be created. [Valid values: %s, %s, %s]", kubeletconfig.SingleNumaNodeTopologyManagerPolicy, kubeletconfig.BestEffortTopologyManagerPolicy, kubeletconfig.RestrictedTopologyManagerPolicy)) - root.PersistentFlags().StringVar(&pcArgs.Info, "info", infoModeLog, fmt.Sprintf("Show cluster information; requires --must-gather-dir-path, ignore the other arguments. [Valid values: %s]", strings.Join(validInfoModes, ", "))) - - return root -} - -func checkRequiredFlags(cmd *cobra.Command, argNames ...string) []string { - missing := []string{} - for _, argName := range argNames { - if !cmd.Flag(argName).Changed { - missing = append(missing, argName) - } - } - return missing -} - -func argNameToFlag(argNames []string) []string { - var flagNames []string - for _, argName := range argNames { - flagNames = append(flagNames, fmt.Sprintf("--%s", argName)) - } - return flagNames -} - -func getClusterData(mustGatherDirPath string) (ClusterData, error) { - cluster := make(ClusterData) - info, err := os.Stat(mustGatherDirPath) - if os.IsNotExist(err) { - return nil, fmt.Errorf("the must-gather path '%s' is not valid", mustGatherDirPath) - } - if err != nil { - return nil, fmt.Errorf("can't access the must-gather path: %v", err) - } - if !info.IsDir() { - return nil, fmt.Errorf("the must-gather path '%s' is not a directory", mustGatherDirPath) - } - - mcps, err := profilecreator.GetMCPList(mustGatherDirPath) - if err != nil { - return nil, fmt.Errorf("failed to get the MCP list under %s: %v", mustGatherDirPath, err) - } - - nodes, err := profilecreator.GetNodeList(mustGatherDirPath) - if err != nil { - return nil, fmt.Errorf("failed to load the cluster nodes: %v", err) - } - - for _, mcp := range mcps { - matchedNodes, err := profilecreator.GetNodesForPool(mcp, mcps, nodes) - if err != nil { - return nil, fmt.Errorf("failed to find MCP %s's nodes: %v", mcp.Name, err) - } - handlers := make([]*profilecreator.GHWHandler, len(matchedNodes)) - for i, node := range matchedNodes { - handle, err := profilecreator.NewGHWHandler(mustGatherDirPath, node) - if err != nil { - return nil, fmt.Errorf("failed to load node's %s's GHW snapshot : %v", mcp.Name, err) - } - handlers[i] = handle - } - cluster[mcp] = handlers - } - - return cluster, nil -} - -// NUMACellInfo describe a NUMA cell on a node -type NUMACellInfo struct { - ID int `json:"id"` - CoreList []int `json:"cores"` -} - -// NodeInfo describe a Node in a MCP -type NodeInfo struct { - Name string `json:"name"` - HTEnabled bool `json:"smt_enabled"` - CPUsCount int `json:"cpus_count"` - NUMACells []NUMACellInfo `json:"numa_cells"` -} - -// MCPInfo describe a MCP in a cluster -type MCPInfo struct { - Name string `json:"name"` - Nodes []NodeInfo `json:"nodes"` -} - -// ClusterInfo describe a cluster -type ClusterInfo []MCPInfo - -// Sort ensures all sequences in the ClusterInfo are sorted, to make comparisons easier. -func (cInfo ClusterInfo) Sort() ClusterInfo { - for _, mcpInfo := range cInfo { - for _, nodeInfo := range mcpInfo.Nodes { - for _, numaCell := range nodeInfo.NUMACells { - sort.Ints(numaCell.CoreList) - } - sort.Slice(nodeInfo.NUMACells, func(i, j int) bool { return nodeInfo.NUMACells[i].ID < nodeInfo.NUMACells[j].ID }) - } - } - sort.Slice(cInfo, func(i, j int) bool { return cInfo[i].Name < cInfo[j].Name }) - return cInfo -} - -func makeClusterInfoFromClusterData(cluster ClusterData) ClusterInfo { - var cInfo ClusterInfo - for mcp, nodeHandlers := range cluster { - mInfo := MCPInfo{ - Name: mcp.Name, - } - for _, handle := range nodeHandlers { - topology, err := handle.SortedTopology() - if err != nil { - log.Infof("%s(Topology discovery error: %v)", handle.Node.GetName(), err) - continue - } - - htEnabled, err := handle.IsHyperthreadingEnabled() - if err != nil { - log.Infof("%s(HT discovery error: %v)", handle.Node.GetName(), err) - } - - nInfo := NodeInfo{ - Name: handle.Node.GetName(), - HTEnabled: htEnabled, - } - - for id, node := range topology.Nodes { - var coreList []int - for _, core := range node.Cores { - coreList = append(coreList, core.LogicalProcessors...) - } - nInfo.CPUsCount += len(coreList) - nInfo.NUMACells = append(nInfo.NUMACells, NUMACellInfo{ - ID: id, - CoreList: coreList, - }) - } - mInfo.Nodes = append(mInfo.Nodes, nInfo) - } - cInfo = append(cInfo, mInfo) - } - return cInfo.Sort() -} - -func showClusterInfoJSON(cInfo ClusterInfo) { - json.NewEncoder(os.Stdout).Encode(cInfo) -} - -func showClusterInfoLog(cInfo ClusterInfo) { - log.Infof("Cluster info:") - for _, mcpInfo := range cInfo { - log.Infof("MCP '%s' nodes:", mcpInfo.Name) - for _, nInfo := range mcpInfo.Nodes { - log.Infof("Node: %s (NUMA cells: %d, HT: %v)", nInfo.Name, len(nInfo.NUMACells), nInfo.HTEnabled) - for _, cInfo := range nInfo.NUMACells { - log.Infof("NUMA cell %d : %v", cInfo.ID, cInfo.CoreList) - } - log.Infof("CPU(s): %d", nInfo.CPUsCount) - } - log.Infof("---") - } -} -func getDataFromFlags(cmd *cobra.Command) (ProfileCreatorArgs, error) { - creatorArgs := ProfileCreatorArgs{} - mustGatherDirPath := cmd.Flag("must-gather-dir-path").Value.String() - mcpName := cmd.Flag("mcp-name").Value.String() - reservedCPUCount, err := strconv.Atoi(cmd.Flag("reserved-cpu-count").Value.String()) - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse reserved-cpu-count flag: %v", err) - } - splitReservedCPUsAcrossNUMA, err := strconv.ParseBool(cmd.Flag("split-reserved-cpus-across-numa").Value.String()) - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse split-reserved-cpus-across-numa flag: %v", err) - } - profileName := cmd.Flag("profile-name").Value.String() - tmPolicy := cmd.Flag("topology-manager-policy").Value.String() - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse topology-manager-policy flag: %v", err) - } - err = validateFlag("topology-manager-policy", tmPolicy, validTMPolicyValues) - if err != nil { - return creatorArgs, fmt.Errorf("invalid value for topology-manager-policy flag specified: %v", err) - } - if tmPolicy == kubeletconfig.SingleNumaNodeTopologyManagerPolicy && splitReservedCPUsAcrossNUMA { - return creatorArgs, fmt.Errorf("not appropriate to split reserved CPUs in case of topology-manager-policy: %v", tmPolicy) - } - powerConsumptionMode := cmd.Flag("power-consumption-mode").Value.String() - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse power-consumption-mode flag: %v", err) - } - err = validateFlag("power-consumption-mode", powerConsumptionMode, profilecreator.ValidPowerConsumptionModes) - if err != nil { - return creatorArgs, fmt.Errorf("invalid value for power-consumption-mode flag specified: %v", err) - } - - rtKernelEnabled, err := strconv.ParseBool(cmd.Flag("rt-kernel").Value.String()) - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse rt-kernel flag: %v", err) - } - - htDisabled, err := strconv.ParseBool(cmd.Flag("disable-ht").Value.String()) - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse disable-ht flag: %v", err) - } - creatorArgs = ProfileCreatorArgs{ - MustGatherDirPath: mustGatherDirPath, - ProfileName: profileName, - ReservedCPUCount: reservedCPUCount, - SplitReservedCPUsAcrossNUMA: splitReservedCPUsAcrossNUMA, - MCPName: mcpName, - TMPolicy: tmPolicy, - RTKernel: rtKernelEnabled, - PowerConsumptionMode: powerConsumptionMode, - DisableHT: htDisabled, - } - - if cmd.Flag("user-level-networking").Changed { - userLevelNetworkingEnabled, err := strconv.ParseBool(cmd.Flag("user-level-networking").Value.String()) - if err != nil { - return creatorArgs, fmt.Errorf("failed to parse user-level-networking flag: %v", err) - } - creatorArgs.UserLevelNetworking = &userLevelNetworkingEnabled - } - - return creatorArgs, nil -} - -func getProfileData(args ProfileCreatorArgs, cluster ClusterData) (*ProfileData, error) { - mcps := make([]*machineconfigv1.MachineConfigPool, len(cluster)) - mcpNames := make([]string, len(cluster)) - var mcp *machineconfigv1.MachineConfigPool - - i := 0 - for m := range cluster { - mcps[i] = m - mcpNames[i] = m.Name - if m.Name == args.MCPName { - mcp = m - } - i++ - } - - if mcp == nil { - return nil, fmt.Errorf("'%s' MCP does not exist, valid values are %v", args.MCPName, mcpNames) - } - - mcpSelector, err := profilecreator.GetMCPSelector(mcp, mcps) - if err != nil { - return nil, fmt.Errorf("failed to compute the MCP selector: %v", err) - } - - if len(cluster[mcp]) == 0 { - return nil, fmt.Errorf("no schedulable nodes are associated with '%s' MCP", args.MCPName) - } - - var matchedNodeNames []string - for _, nodeHandler := range cluster[mcp] { - matchedNodeNames = append(matchedNodeNames, nodeHandler.Node.GetName()) - } - log.Infof("Nodes targetted by %s MCP are: %v", args.MCPName, matchedNodeNames) - err = profilecreator.EnsureNodesHaveTheSameHardware(cluster[mcp]) - if err != nil { - return nil, fmt.Errorf("targeted nodes differ: %v", err) - } - - // We make sure that the matched Nodes are the same - // Assumption here is moving forward matchedNodes[0] is representative of how all the nodes are - // same from hardware topology point of view - - nodeHandle := cluster[mcp][0] - reservedCPUs, isolatedCPUs, err := nodeHandle.GetReservedAndIsolatedCPUs(args.ReservedCPUCount, args.SplitReservedCPUsAcrossNUMA, args.DisableHT) - if err != nil { - return nil, fmt.Errorf("failed to compute the reserved and isolated CPUs: %v", err) - } - log.Infof("%d reserved CPUs allocated: %v ", reservedCPUs.Size(), reservedCPUs.String()) - log.Infof("%d isolated CPUs allocated: %v", isolatedCPUs.Size(), isolatedCPUs.String()) - kernelArgs := profilecreator.GetAdditionalKernelArgs(args.PowerConsumptionMode, args.DisableHT) - profileData := &ProfileData{ - reservedCPUs: reservedCPUs.String(), - isolatedCPUs: isolatedCPUs.String(), - nodeSelector: mcp.Spec.NodeSelector, - mcpSelector: mcpSelector, - performanceProfileName: args.ProfileName, - topologyPoilcy: args.TMPolicy, - rtKernel: args.RTKernel, - additionalKernelArgs: kernelArgs, - userLevelNetworking: args.UserLevelNetworking, - } - return profileData, nil -} - -func validateFlag(name, value string, validValues []string) error { - if isStringInSlice(value, validValues) { - return nil - } - return fmt.Errorf("flag %q: Value '%s' is invalid. Valid values "+ - "come from the set %v", name, value, validValues) -} - -func isStringInSlice(value string, candidates []string) bool { - for _, candidate := range candidates { - if strings.EqualFold(candidate, value) { - return true - } - } - return false -} - -// ProfileCreatorArgs represents the arguments passed to the ProfileCreator -type ProfileCreatorArgs struct { - PowerConsumptionMode string `json:"power-consumption-mode"` - MustGatherDirPath string `json:"must-gather-dir-path"` - ProfileName string `json:"profile-name"` - ReservedCPUCount int `json:"reserved-cpu-count"` - SplitReservedCPUsAcrossNUMA bool `json:"split-reserved-cpus-across-numa"` - DisableHT bool `json:"disable-ht"` - RTKernel bool `json:"rt-kernel"` - UserLevelNetworking *bool `json:"user-level-networking,omitempty"` - MCPName string `json:"mcp-name"` - TMPolicy string `json:"topology-manager-policy"` - Info string `json:"info"` -} - -func createProfile(profileData ProfileData) { - reserved := performancev2.CPUSet(profileData.reservedCPUs) - isolated := performancev2.CPUSet(profileData.isolatedCPUs) - // TODO: Get the name from MCP if not specified in the command line arguments - profile := &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev2.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: profileData.performanceProfileName, - }, - Spec: performancev2.PerformanceProfileSpec{ - CPU: &performancev2.CPU{ - Isolated: &isolated, - Reserved: &reserved, - }, - MachineConfigPoolSelector: profileData.mcpSelector, - NodeSelector: profileData.nodeSelector.MatchLabels, - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: &profileData.rtKernel, - }, - AdditionalKernelArgs: profileData.additionalKernelArgs, - NUMA: &performancev2.NUMA{ - TopologyPolicy: &profileData.topologyPoilcy, - }, - }, - } - - if profileData.userLevelNetworking != nil { - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: profileData.userLevelNetworking, - } - } - - // write CSV to out dir - writer := strings.Builder{} - MarshallObject(&profile, &writer) - - fmt.Printf("%s", writer.String()) -} - -// MarshallObject mashals an object, usually a CSV into YAML -func MarshallObject(obj interface{}, writer io.Writer) error { - jsonBytes, err := json.Marshal(obj) - if err != nil { - return err - } - - var r unstructured.Unstructured - if err := json.Unmarshal(jsonBytes, &r.Object); err != nil { - return err - } - - // remove status and metadata.creationTimestamp - unstructured.RemoveNestedField(r.Object, "metadata", "creationTimestamp") - unstructured.RemoveNestedField(r.Object, "template", "metadata", "creationTimestamp") - unstructured.RemoveNestedField(r.Object, "spec", "template", "metadata", "creationTimestamp") - unstructured.RemoveNestedField(r.Object, "status") - - deployments, exists, err := unstructured.NestedSlice(r.Object, "spec", "install", "spec", "deployments") - if exists { - for _, obj := range deployments { - deployment := obj.(map[string]interface{}) - unstructured.RemoveNestedField(deployment, "metadata", "creationTimestamp") - unstructured.RemoveNestedField(deployment, "spec", "template", "metadata", "creationTimestamp") - unstructured.RemoveNestedField(deployment, "status") - } - unstructured.SetNestedSlice(r.Object, deployments, "spec", "install", "spec", "deployments") - } - - jsonBytes, err = json.Marshal(r.Object) - if err != nil { - return err - } - - yamlBytes, err := yaml.JSONToYAML(jsonBytes) - if err != nil { - return err - } - - // fix double quoted strings by removing unneeded single quotes... - s := string(yamlBytes) - s = strings.Replace(s, " '\"", " \"", -1) - s = strings.Replace(s, "\"'\n", "\"\n", -1) - - yamlBytes = []byte(s) - - _, err = writer.Write([]byte("---\n")) - if err != nil { - return err - } - - _, err = writer.Write(yamlBytes) - if err != nil { - return err - } - - return nil -} diff --git a/cmd/performance-profile-creator/main.go b/cmd/performance-profile-creator/main.go deleted file mode 100644 index 3ba7cdc4d..000000000 --- a/cmd/performance-profile-creator/main.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2021 Red Hat, Inc. - */ - -package main - -import ( - "os" - - "github.com/openshift/cluster-node-tuning-operator/cmd/performance-profile-creator/cmd" -) - -func main() { - root := cmd.NewRootCommand() - if err := root.Execute(); err != nil { - os.Exit(1) - } -} diff --git a/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml b/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml deleted file mode 100644 index 1028c6daf..000000000 --- a/examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml +++ /dev/null @@ -1,679 +0,0 @@ - ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - controller-gen.kubebuilder.io/version: (devel) - creationTimestamp: null - name: performanceprofiles.performance.openshift.io -spec: - group: performance.openshift.io - names: - kind: PerformanceProfile - listKind: PerformanceProfileList - plural: performanceprofiles - singular: performanceprofile - scope: Cluster - versions: - - deprecated: true - deprecationWarning: v1 is deprecated and should be removed in next three releases, - use v2 instead - name: v1 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles - API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - properties: - additionalKernelArgs: - description: Addional kernel arguments. - items: - type: string - type: array - cpu: - description: CPU defines a set of CPU related parameters. - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated - CPU set is eligible for load balancing work loads. When this - option is set to "false", the Isolated CPU set will be static, - meaning workloads have to explicitly assign each thread to a - specific cpu in order to work across multiple CPUs. Setting - this to "true" allows workloads to be balanced across CPUs. - Setting this to "false" offers the most predictable performance - for guaranteed workloads, but it offloads the complexity of - cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used - to give to application threads the most execution time possible, - which means removing as many extraneous tasks off a CPU as possible. - It is important to notice the CPU manager can choose any CPU - to run the workload except the reserved CPUs. In order to guarantee - that your workload will run on the isolated CPU: 1. The union - of reserved CPUs and isolated CPUs should include all online - CPUs 2. The isolated CPUs field should be the complementary - to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used - for any container workloads initiated by kubelet. - type: string - required: - - isolated - type: object - globallyDisableIrqLoadBalancing: - description: GloballyDisableIrqLoadBalancing toggles whether IRQ load - balancing will be disabled for the Isolated CPU set. When the option - is set to "true" it disables IRQs load balancing for the Isolated - CPU set. Setting the option to "false" allows the IRQs to be balanced - across all CPUs, however the IRQs load balancing can be disabled - per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io - annotations. Defaults to "false" - type: boolean - hugepages: - description: HugePages defines a set of huge pages related parameters. - It is possible to set huge pages with multiple size values at the - same time. For example, hugepages can be set with 1G and 2M, both - values will be set on the node by the performance-addon-operator. - It is important to notice that setting hugepages default size to - 1G will remove all 2M related folders from the node and it will - be impossible to configure 2M hugepages under the node. - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size - under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate - at boot time. - items: - description: HugePage defines the number of allocated huge pages - of the specific size. - properties: - count: - description: Count defines amount of huge pages, maps to - the 'hugepages' kernel boot parameter. - format: int32 - type: integer - node: - description: Node defines the NUMA node where hugepages - will be allocated, if not specified, pages will be allocated - equally between NUMA nodes - format: int32 - type: integer - size: - description: Size defines huge page size, maps to the 'hugepagesz' - kernel boot parameter. - type: string - type: object - type: array - type: object - machineConfigLabel: - additionalProperties: - type: string - description: MachineConfigLabel defines the label to add to the MachineConfigs - the operator creates. It has to be used in the MachineConfigSelector - of the MachineConfigPool which targets this performance profile. - Defaults to "machineconfiguration.openshift.io/role=" - type: object - machineConfigPoolSelector: - additionalProperties: - type: string - description: MachineConfigPoolSelector defines the MachineConfigPool - label to use in the MachineConfigPoolSelector of resources like - KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - net: - description: Net defines a set of network related features - properties: - devices: - description: Devices contains a list of network device representations - that will be set with a netqueue count equal to CPU.Reserved - . If no devices are specified then the default is all devices. - items: - description: 'Device defines a way to represent a network device - in several options: device name, vendor ID, model ID, PCI - path and MAC address' - properties: - deviceID: - description: Network device ID (model) represnted as a 16 - bit hexmadecimal number. - type: string - interfaceName: - description: Network device name to be matched. It uses - a syntax of shell-style wildcards which are either positive - or negative. - type: string - vendorID: - description: Network device vendor ID represnted as a 16 - bit Hexmadecimal number. - type: string - type: object - type: array - userLevelNetworking: - description: UserLevelNetworking when enabled - sets either all - or specified network devices queue size to the amount of reserved - CPUs. Defaults to "false". - type: boolean - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector defines the Node label to use in the NodeSelectors - of resources like Tuned created by the operator. It most likely - should, but does not have to match the node label in the NodeSelector - of the MachineConfigPool which targets this performance profile. - In the case when machineConfigLabels or machineConfigPoolSelector - are not set, we are expecting a certain NodeSelector format /: - "" in order to be able to calculate the default values for the former - mentioned fields.' - type: object - numa: - description: NUMA defines options related to topology aware affinities - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is - enabled Operator defaults to "best-effort" - type: string - type: object - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related - parameters. RT kernel won't be installed when not set. - properties: - enabled: - description: Enabled defines if the real time kernel packages - should be installed. Defaults to "false" - type: boolean - type: object - required: - - cpu - - nodeSelector - type: object - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - properties: - conditions: - description: Conditions represents the latest available observations - of current state. - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource - created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that - contains the tuning values generated by this operator. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - - deprecated: true - deprecationWarning: v1alpha1 is deprecated and should be removed in the next release, - use v2 instead - name: v1alpha1 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles - API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - properties: - additionalKernelArgs: - description: Addional kernel arguments. - items: - type: string - type: array - cpu: - description: CPU defines a set of CPU related parameters. - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated - CPU set is eligible for load balancing work loads. When this - option is set to "false", the Isolated CPU set will be static, - meaning workloads have to explicitly assign each thread to a - specific cpu in order to work across multiple CPUs. Setting - this to "true" allows workloads to be balanced across CPUs. - Setting this to "false" offers the most predictable performance - for guaranteed workloads, but it offloads the complexity of - cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used - to give to application threads the most execution time possible, - which means removing as many extraneous tasks off a CPU as possible. - It is important to notice the CPU manager can choose any CPU - to run the workload except the reserved CPUs. In order to guarantee - that your workload will run on the isolated CPU: 1. The union - of reserved CPUs and isolated CPUs should include all online - CPUs 2. The isolated CPUs field should be the complementary - to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used - for any container workloads initiated by kubelet. - type: string - type: object - hugepages: - description: HugePages defines a set of huge pages related parameters. - It is possible to set huge pages with multiple size values at the - same time. For example, hugepages can be set with 1G and 2M, both - values will be set on the node by the performance-addon-operator. - It is important to notice that setting hugepages default size to - 1G will remove all 2M related folders from the node and it will - be impossible to configure 2M hugepages under the node. - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size - under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate - at boot time. - items: - description: HugePage defines the number of allocated huge pages - of the specific size. - properties: - count: - description: Count defines amount of huge pages, maps to - the 'hugepages' kernel boot parameter. - format: int32 - type: integer - node: - description: Node defines the NUMA node where hugepages - will be allocated, if not specified, pages will be allocated - equally between NUMA nodes - format: int32 - type: integer - size: - description: Size defines huge page size, maps to the 'hugepagesz' - kernel boot parameter. - type: string - type: object - type: array - type: object - machineConfigLabel: - additionalProperties: - type: string - description: MachineConfigLabel defines the label to add to the MachineConfigs - the operator creates. It has to be used in the MachineConfigSelector - of the MachineConfigPool which targets this performance profile. - Defaults to "machineconfiguration.openshift.io/role=" - type: object - machineConfigPoolSelector: - additionalProperties: - type: string - description: MachineConfigPoolSelector defines the MachineConfigPool - label to use in the MachineConfigPoolSelector of resources like - KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - nodeSelector: - additionalProperties: - type: string - description: NodeSelector defines the Node label to use in the NodeSelectors - of resources like Tuned created by the operator. It most likely - should, but does not have to match the node label in the NodeSelector - of the MachineConfigPool which targets this performance profile. - type: object - numa: - description: NUMA defines options related to topology aware affinities - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is - enabled Operator defaults to "best-effort" - type: string - type: object - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related - parameters. RT kernel won't be installed when not set. - properties: - enabled: - description: Enabled defines if the real time kernel packages - should be installed. Defaults to "false" - type: boolean - type: object - type: object - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - properties: - conditions: - description: Conditions represents the latest available observations - of current state. - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource - created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that - contains the tuning values generated by this operator. - type: string - type: object - type: object - served: true - storage: false - subresources: - status: {} - - name: v2 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles - API - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - properties: - additionalKernelArgs: - description: Addional kernel arguments. - items: - type: string - type: array - cpu: - description: CPU defines a set of CPU related parameters. - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated - CPU set is eligible for load balancing work loads. When this - option is set to "false", the Isolated CPU set will be static, - meaning workloads have to explicitly assign each thread to a - specific cpu in order to work across multiple CPUs. Setting - this to "true" allows workloads to be balanced across CPUs. - Setting this to "false" offers the most predictable performance - for guaranteed workloads, but it offloads the complexity of - cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used - to give to application threads the most execution time possible, - which means removing as many extraneous tasks off a CPU as possible. - It is important to notice the CPU manager can choose any CPU - to run the workload except the reserved CPUs. In order to guarantee - that your workload will run on the isolated CPU: 1. The union - of reserved CPUs and isolated CPUs should include all online - CPUs 2. The isolated CPUs field should be the complementary - to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used - for any container workloads initiated by kubelet. - type: string - required: - - isolated - - reserved - type: object - globallyDisableIrqLoadBalancing: - description: GloballyDisableIrqLoadBalancing toggles whether IRQ load - balancing will be disabled for the Isolated CPU set. When the option - is set to "true" it disables IRQs load balancing for the Isolated - CPU set. Setting the option to "false" allows the IRQs to be balanced - across all CPUs, however the IRQs load balancing can be disabled - per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io - annotations. Defaults to "false" - type: boolean - hugepages: - description: HugePages defines a set of huge pages related parameters. - It is possible to set huge pages with multiple size values at the - same time. For example, hugepages can be set with 1G and 2M, both - values will be set on the node by the performance-addon-operator. - It is important to notice that setting hugepages default size to - 1G will remove all 2M related folders from the node and it will - be impossible to configure 2M hugepages under the node. - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size - under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate - at boot time. - items: - description: HugePage defines the number of allocated huge pages - of the specific size. - properties: - count: - description: Count defines amount of huge pages, maps to - the 'hugepages' kernel boot parameter. - format: int32 - type: integer - node: - description: Node defines the NUMA node where hugepages - will be allocated, if not specified, pages will be allocated - equally between NUMA nodes - format: int32 - type: integer - size: - description: Size defines huge page size, maps to the 'hugepagesz' - kernel boot parameter. - type: string - type: object - type: array - type: object - machineConfigLabel: - additionalProperties: - type: string - description: MachineConfigLabel defines the label to add to the MachineConfigs - the operator creates. It has to be used in the MachineConfigSelector - of the MachineConfigPool which targets this performance profile. - Defaults to "machineconfiguration.openshift.io/role=" - type: object - machineConfigPoolSelector: - additionalProperties: - type: string - description: MachineConfigPoolSelector defines the MachineConfigPool - label to use in the MachineConfigPoolSelector of resources like - KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - net: - description: Net defines a set of network related features - properties: - devices: - description: Devices contains a list of network device representations - that will be set with a netqueue count equal to CPU.Reserved - . If no devices are specified then the default is all devices. - items: - description: 'Device defines a way to represent a network device - in several options: device name, vendor ID, model ID, PCI - path and MAC address' - properties: - deviceID: - description: Network device ID (model) represnted as a 16 - bit hexmadecimal number. - type: string - interfaceName: - description: Network device name to be matched. It uses - a syntax of shell-style wildcards which are either positive - or negative. - type: string - vendorID: - description: Network device vendor ID represnted as a 16 - bit Hexmadecimal number. - type: string - type: object - type: array - userLevelNetworking: - description: UserLevelNetworking when enabled - sets either all - or specified network devices queue size to the amount of reserved - CPUs. Defaults to "false". - type: boolean - type: object - nodeSelector: - additionalProperties: - type: string - description: 'NodeSelector defines the Node label to use in the NodeSelectors - of resources like Tuned created by the operator. It most likely - should, but does not have to match the node label in the NodeSelector - of the MachineConfigPool which targets this performance profile. - In the case when machineConfigLabels or machineConfigPoolSelector - are not set, we are expecting a certain NodeSelector format /: - "" in order to be able to calculate the default values for the former - mentioned fields.' - type: object - numa: - description: NUMA defines options related to topology aware affinities - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is - enabled Operator defaults to "best-effort" - type: string - type: object - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related - parameters. RT kernel won't be installed when not set. - properties: - enabled: - description: Enabled defines if the real time kernel packages - should be installed. Defaults to "false" - type: boolean - type: object - required: - - cpu - - nodeSelector - type: object - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - properties: - conditions: - description: Conditions represents the latest available observations - of current state. - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource - created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that - contains the tuning values generated by this operator. - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/examples/pao/crd/kustomization.yaml b/examples/pao/crd/kustomization.yaml deleted file mode 100644 index b651e6b26..000000000 --- a/examples/pao/crd/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# This kustomization.yaml is not intended to be run by itself, -# since it depends on service name and namespace that are out of this kustomize package. -# It should be run by config/default -resources: -- bases/performance.openshift.io_performanceprofiles.yaml -# +kubebuilder:scaffold:crdkustomizeresource diff --git a/examples/pao/default/kustomization.yaml b/examples/pao/default/kustomization.yaml deleted file mode 100644 index 918debf45..000000000 --- a/examples/pao/default/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -bases: -- ../crd -- ../rbac -- ../samples -- ../manager diff --git a/examples/pao/rbac/kustomization.yaml b/examples/pao/rbac/kustomization.yaml deleted file mode 100644 index b228b96da..000000000 --- a/examples/pao/rbac/kustomization.yaml +++ /dev/null @@ -1,3 +0,0 @@ -resources: -- role.yaml -- role_binding.yaml diff --git a/examples/pao/rbac/role.yaml b/examples/pao/rbac/role.yaml deleted file mode 100644 index 285346831..000000000 --- a/examples/pao/rbac/role.yaml +++ /dev/null @@ -1,100 +0,0 @@ - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - creationTimestamp: null - name: performance-operator -rules: -- apiGroups: - - "" - resources: - - events - verbs: - - '*' -- apiGroups: - - "" - resources: - - nodes - verbs: - - get - - list - - watch -- apiGroups: - - config.openshift.io - resources: - - infrastructures - verbs: - - get - - list - - watch -- apiGroups: - - machineconfiguration.openshift.io - resources: - - kubeletconfigs - - machineconfigpools - - machineconfigs - verbs: - - '*' -- apiGroups: - - node.k8s.io - resources: - - runtimeclasses - verbs: - - '*' -- apiGroups: - - performance.openshift.io - resources: - - performanceprofiles - - performanceprofiles/finalizers - - performanceprofiles/status - verbs: - - '*' -- apiGroups: - - tuned.openshift.io - resources: - - profiles - - tuneds - verbs: - - '*' - ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - creationTimestamp: null - name: performance-operator - namespace: openshift-cluster-node-tuning-operator -rules: -- apiGroups: - - apps - resourceNames: - - performance-operator - resources: - - deployments/finalizers - verbs: - - update -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - create - - get - - list - - update -- apiGroups: - - "" - resources: - - configmaps - - pods - - services - - services/finalizers - verbs: - - '*' -- apiGroups: - - monitoring.coreos.com - resources: - - servicemonitors - verbs: - - '*' diff --git a/examples/pao/rbac/role_binding.yaml b/examples/pao/rbac/role_binding.yaml deleted file mode 100644 index dc745682e..000000000 --- a/examples/pao/rbac/role_binding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: performance-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: performance-operator -subjects: -- kind: ServiceAccount - name: performance-operator ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: performance-operator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: performance-operator -subjects: - - kind: ServiceAccount - name: performance-operator - diff --git a/examples/pao/samples/kustomization.yaml b/examples/pao/samples/kustomization.yaml deleted file mode 100644 index 39a50a614..000000000 --- a/examples/pao/samples/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -## Append samples you want in your CSV to this file as resources ## -resources: -- performance_v2_performanceprofile.yaml -- performance_v1_performanceprofile.yaml -- performance_v1alpha1_performanceprofile.yaml -# +kubebuilder:scaffold:manifestskustomizesamples diff --git a/examples/pao/samples/performance_v1_performanceprofile.yaml b/examples/pao/samples/performance_v1_performanceprofile.yaml deleted file mode 100644 index 4db8eb883..000000000 --- a/examples/pao/samples/performance_v1_performanceprofile.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: performance.openshift.io/v1 -kind: PerformanceProfile -metadata: - name: example-performanceprofile -spec: - additionalKernelArgs: - - "nmi_watchdog=0" - - "audit=0" - - "mce=off" - - "processor.max_cstate=1" - - "idle=poll" - - "intel_idle.max_cstate=0" - cpu: - isolated: "2-3" - reserved: "0-1" - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 2 - node: 0 - realTimeKernel: - enabled: true - nodeSelector: - node-role.kubernetes.io/performance: "" diff --git a/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml b/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml deleted file mode 100644 index 9aa978876..000000000 --- a/examples/pao/samples/performance_v1alpha1_performanceprofile.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: performance.openshift.io/v1alpha1 -kind: PerformanceProfile -metadata: - name: example-performanceprofile -spec: - additionalKernelArgs: - - "nmi_watchdog=0" - - "audit=0" - - "mce=off" - - "processor.max_cstate=1" - - "idle=poll" - - "intel_idle.max_cstate=0" - cpu: - isolated: "2-3" - reserved: "0-1" - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 2 - node: 0 - realTimeKernel: - enabled: true - nodeSelector: - node-role.kubernetes.io/performance: "" diff --git a/examples/pao/samples/performance_v2_performanceprofile.yaml b/examples/pao/samples/performance_v2_performanceprofile.yaml deleted file mode 100644 index 7a95b6b2a..000000000 --- a/examples/pao/samples/performance_v2_performanceprofile.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: example-performanceprofile -spec: - additionalKernelArgs: - - "nmi_watchdog=0" - - "audit=0" - - "mce=off" - - "processor.max_cstate=1" - - "idle=poll" - - "intel_idle.max_cstate=0" - cpu: - isolated: "2-3" - reserved: "0-1" - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 2 - node: 0 - realTimeKernel: - enabled: true - nodeSelector: - # we can not leave an empty string "" because it interpreted as null value - node-role.kubernetes.io/performance: "test" diff --git a/go.mod b/go.mod index fe005528f..bb6955cdb 100644 --- a/go.mod +++ b/go.mod @@ -3,106 +3,76 @@ module github.com/openshift/cluster-node-tuning-operator go 1.17 require ( - github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43 - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f - github.com/coreos/ignition v0.35.0 - github.com/coreos/ignition/v2 v2.9.0 - github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/google/go-cmp v0.5.6 - github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b + github.com/coreos/ignition/v2 v2.7.0 github.com/kevinburke/go-bindata v3.16.0+incompatible github.com/onsi/ginkgo v1.16.5 github.com/onsi/gomega v1.17.0 - github.com/openshift/api v0.0.0-20220110171111-997c316db5e1 + github.com/openshift/api v0.0.0-20211209135129-c58d9f695577 github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 - github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca - github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492 + github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25 - github.com/operator-framework/api v0.10.7 - github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 - github.com/sirupsen/logrus v1.8.1 - github.com/spf13/cobra v1.2.1 - github.com/spf13/pflag v1.0.5 gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/ini.v1 v1.62.0 - k8s.io/api v0.23.3 - k8s.io/apiextensions-apiserver v0.23.3 - k8s.io/apimachinery v0.23.3 - k8s.io/client-go v0.23.3 - k8s.io/code-generator v0.23.3 + k8s.io/api v0.23.0 + k8s.io/apimachinery v0.23.0 + k8s.io/client-go v0.23.0 + k8s.io/code-generator v0.23.0 k8s.io/klog v1.0.0 k8s.io/klog/v2 v2.30.0 - k8s.io/kubelet v0.23.3 - k8s.io/kubernetes v0.23.3 - k8s.io/utils v0.0.0-20211116205334-6203023598ed - kubevirt.io/qe-tools v0.1.6 + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b sigs.k8s.io/controller-runtime v0.11.0 - sigs.k8s.io/controller-tools v0.6.2 - sigs.k8s.io/yaml v1.3.0 + sigs.k8s.io/controller-tools v0.4.0 ) require ( github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect - github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef // indirect + github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.1.1 // indirect - github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect - github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c // indirect + github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful v2.10.0+incompatible // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fatih/color v1.12.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/ghodss/yaml v1.0.0 // indirect github.com/go-logr/logr v1.2.0 // indirect - github.com/go-ole/go-ole v1.2.4 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.19.9 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/strfmt v0.21.2 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect - github.com/go-stack/stack v1.8.0 // indirect + github.com/go-openapi/jsonreference v0.19.5 // indirect + github.com/go-openapi/swag v0.19.14 // indirect github.com/gobuffalo/flect v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.2.0 // indirect + github.com/google/go-cmp v0.5.6 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.2 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect - github.com/jaypipes/pcidb v0.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-colorable v0.1.8 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/go-homedir v1.1.0 // indirect - github.com/mitchellh/mapstructure v1.4.1 // indirect - github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/nxadm/tail v1.4.8 // indirect - github.com/oklog/ulid v1.3.1 // indirect github.com/prometheus/client_model v0.2.0 // indirect github.com/prometheus/common v0.28.0 // indirect github.com/prometheus/procfs v0.6.0 // indirect + github.com/spf13/cobra v1.2.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 // indirect - go.mongodb.org/mongo-driver v1.7.5 // indirect golang.org/x/mod v0.4.2 // indirect - golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect + golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 // indirect golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect @@ -117,52 +87,42 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect - howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect - k8s.io/component-base v0.23.3 // indirect + k8s.io/apiextensions-apiserver v0.23.0 // indirect + k8s.io/component-base v0.23.0 // indirect k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) -// Pinned to kubernetes-1.23.3 +// Pinned to kubernetes-1.23.0 replace ( - k8s.io/api => k8s.io/api v0.23.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.23.3 - k8s.io/apiserver => k8s.io/apiserver v0.23.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.3 - k8s.io/client-go => k8s.io/client-go v0.23.3 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.3 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.3 - k8s.io/code-generator => k8s.io/code-generator v0.23.3 - k8s.io/component-base => k8s.io/component-base v0.23.3 - k8s.io/component-helpers => k8s.io/component-helpers v0.23.3 - k8s.io/controller-manager => k8s.io/controller-manager v0.23.3 - k8s.io/cri-api => k8s.io/cri-api v0.23.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.3 - k8s.io/kubectl => k8s.io/kubectl v0.23.3 - k8s.io/kubelet => k8s.io/kubelet v0.23.3 - k8s.io/kubernetes => k8s.io/kubernetes v1.23.3 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.3 - k8s.io/metrics => k8s.io/metrics v0.23.3 - k8s.io/mount-utils => k8s.io/mount-utils v0.23.3 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.23.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.3 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.1 + k8s.io/api => k8s.io/api v0.23.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.23.0 + k8s.io/apiserver => k8s.io/apiserver v0.23.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.0 + k8s.io/client-go => k8s.io/client-go v0.23.0 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.0 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.0 + k8s.io/code-generator => k8s.io/code-generator v0.23.0 + k8s.io/component-base => k8s.io/component-base v0.23.0 + k8s.io/component-helpers => k8s.io/component-helpers v0.23.0 + k8s.io/controller-manager => k8s.io/controller-manager v0.23.0 + k8s.io/cri-api => k8s.io/cri-api v0.23.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.0 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.0 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.0 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.0 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.0 + k8s.io/kubectl => k8s.io/kubectl v0.23.0 + k8s.io/kubelet => k8s.io/kubelet v0.23.0 + k8s.io/kubernetes => k8s.io/kubernetes v1.23.0 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.0 + k8s.io/metrics => k8s.io/metrics v0.23.0 + k8s.io/mount-utils => k8s.io/mount-utils v0.23.0 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.0 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.11.0 sigs.k8s.io/controller-tools => sigs.k8s.io/controller-tools v0.7.0 ) - -// Other PAO pinned deps -replace ( - github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.0+incompatible - github.com/coreos/prometheus-operator => github.com/coreos/prometheus-operator v0.40.0 - github.com/mtrmac/gpgme => github.com/mtrmac/gpgme v0.1.1 - github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344 // release-4.11 -) - -replace vbom.ml/util => github.com/fvbommel/util v0.0.0-20180919145318-efcd4e0f9787 diff --git a/go.sum b/go.sum index b0d1917d0..29e49be22 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,3 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -41,8 +39,7 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-sdk-for-go v55.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= @@ -51,31 +48,32 @@ github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgq github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v1.16.1-0.20210702024009-ea6160c1d0e3/go.mod h1:8XasY4ymP2V/tn2OOV9ZadmiTE1FIB/h3W+yNlPttKw= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= +github.com/InVisionApp/go-health v2.1.0+incompatible/go.mod h1:/+Gv1o8JUsrjC6pi6MN6/CgKJo4OqZ6x77XAnImrzhg= github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= -github.com/Microsoft/hcsshim v0.8.22/go.mod h1:91uVCVzvX2QD16sMCenoxxXo6L1wJnLMX2PSufFMtF0= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.20.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43 h1:Sb81vKYD+uXItFlAbCtOd2I13V4B1xaURhCEQb4Lzqs= -github.com/RHsyseng/operator-utils v0.0.0-20200213165520-1a022eb07a43/go.mod h1:E+hCtYz+9UsXfAGnRjX2LGuaa5gSGNKHCVTmGZR79vY= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -83,17 +81,15 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/apparentlymart/go-cidr v1.0.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef h1:46PFijGLmAjMPwCCCo7Jf0W6f9slllCkkv7vyc1yOSg= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= -github.com/auth0/go-jwt-middleware v1.0.1/go.mod h1:YSeUX3z6+TF2H+7padiEqNJ73Zy9vXW72U//IgN0BIM= +github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c/go.mod h1:BRljTyotlu+6N+Qlu5MhjxpdmccCnp9lDvZjNNV8qr4= +github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.38.49/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -101,78 +97,79 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= -github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= -github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.11/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= -github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= -github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= -github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= -github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.14/go.mod h1:XnhgULOEouimnzgn0t4WPuFDN2/PJQcTxdWKC5eXNGE= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containers/image v3.0.2+incompatible/go.mod h1:8Vtij257IWSanUQKe1tAeNOm2sRVkSqQTVQ1IlwI3+M= +github.com/containers/image/v5 v5.5.1/go.mod h1:4PyNYR0nwlGq/ybVJD9hWlhmIsNra4Q8uOQX2s6E2uM= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.0.2/go.mod h1:nsOhbP19flrX6rE7ieGFvBlr7modwmNjsqWarIUce4M= +github.com/containers/storage v1.20.2/go.mod h1:oOB9Ie8OVPojvoaKWEGSEtHbXUAs+tSyr7RO7ZGteMc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083 h1:iLYct0QOZLUuTbFBf+PDiKvpG1xPicwkcgnKaGCeTgc= -github.com/coreos/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:FmxyHfvrCFfCsXRylD4QQRlQmvzl+DG6iTHyEEykPfU= +github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa/go.mod h1:pqAsDWa5YDi10Va/aqQI0bwOs9hXqoE2xwb5vnFys5s= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/coreos/ignition/v2 v2.9.0 h1:Zl5N08OyqlECB8BrBlMDp3Jf1ShwVTtREPcUq/YO034= -github.com/coreos/ignition/v2 v2.9.0/go.mod h1:A5lFFzA2/zvZQPVEvI1lR5WPLWRb7KZ7Q1QOeUMtcAc= +github.com/coreos/ignition/v2 v2.1.1/go.mod h1:RqmqU64zxarUJa3l4cHtbhcSwfQLpUhv0WVziZwoXvE= +github.com/coreos/ignition/v2 v2.7.0 h1:JCKxJllVtnk1lQY1uisxrtFSHG5L2NI1LRzc8wBEk84= +github.com/coreos/ignition/v2 v2.7.0/go.mod h1:3CjaRpg51hmJzPjarbzB0RvSZbLkNOczxKJobTl6nOY= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c h1:jA28WeORitsxGFVWhyWB06sAG2HbLHPQuHwDydhU2CQ= -github.com/coreos/vcontext v0.0.0-20201120045928-b0e13dab675c/go.mod h1:z4pMVvaUrxs98RROlIYdAQCKhEicjnTirOaVyDRH5h8= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/coreos/vcontext v0.0.0-20190529201340-22b159166068/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= +github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5 h1:DjoHHi6+9J7DGYPvBdmszKZLY+ucx2bnA77jf8KIk9M= +github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= @@ -184,15 +181,23 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/distribution v0.0.0-20180920194744-16128bbac47f/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libnetwork v0.0.0-20190731215715-7f13a5c99f4b/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= +github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= +github.com/elazarl/goproxy/ext v0.0.0-20190911111923-ecfe977594f1/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.10.0+incompatible h1:l6Soi8WCOOVAeCo4W98iBFC6Og7/X8bpRt51oNLZ2C8= @@ -205,34 +210,34 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsouza/go-dockerclient v0.0.0-20171004212419-da3951ba2e9e/go.mod h1:KpcjM623fQYE9MZiTGzKhjfxXAV9wbyX2C1cyRHfhl0= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.0.0-20190513200303-c977f96e1095/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew= -github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= +github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= +github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -240,95 +245,65 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= -github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9 h1:9SnKdGhiPZHF3ttwFMiCBEb8jQ4IDdrK+5+a0oTygA4= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= -github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= -github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -355,12 +330,33 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= +github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= +github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= +github.com/gonum/graph v0.0.0-20170401004347-50b27dea7ebb/go.mod h1:ye018NnX1zrbOLqwBvs2HqyyTouQgnL8C+qzYk1snPY= +github.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks= +github.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A= +github.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/cadvisor v0.43.0/go.mod h1:+RdMSbc3FVr5NYCD2dOEJy/LI0jYJ/0xJXkzWXEyiFQ= github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -377,9 +373,8 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -398,28 +393,34 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 h1:l5lAOZEym3oK3SQ2HBHWsJUfbNBiTXJDeW2QDxw9AQ0= -github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v0.0.0-20191024121256-f395758b854c/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -436,37 +437,34 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/heketi/heketi v10.3.0+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/ishidawataru/sctp v0.0.0-20190723014705-7c296d48a2b5/go.mod h1:DM4VvS+hD/kDi1U1QsX2fnZowwBhqD0Dk3bRPKF/Oc8= -github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b h1:gqEethdcv2egL3XtvkHh47m4nj09q4XO/VTioGKDLDI= -github.com/jaypipes/ghw v0.8.1-0.20210605191321-eb162add542b/go.mod h1:+gR9bjm3W/HnFi90liF+Fj9GpCe/Dsibl9Im8KmC7c4= -github.com/jaypipes/pcidb v0.6.0 h1:VIM7GKVaW4qba30cvB67xSCgJPTzkG8Kzw/cbs5PHWU= -github.com/jaypipes/pcidb v0.6.0/go.mod h1:L2RGk04sfRhp5wvHO0gfRAMoLY/F3PKv/nwJeVoho0o= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -478,35 +476,36 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7 github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/go-bindata v3.16.0+incompatible h1:TFzFZop2KxGhqNwsyjgmIh5JOrpG940MZlm5gNbxr8g= github.com/kevinburke/go-bindata v3.16.0+incompatible/go.mod h1:/pEEZ72flUW2p0yi30bslSp9YqD9pysLxunQDdb2CPM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.10.8/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= @@ -515,45 +514,36 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mikefarah/yq/v3 v3.0.0-20201202084205-8846255d1c37/go.mod h1:dYWq+UWoFCDY1TndvFUQuhBbIYmZpjreC8adEAx93zE= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/ipvs v1.0.1/go.mod h1:2pngiyseZbIKXNv7hsKj3O9UEz30c53MT9005gt2hxQ= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= -github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -562,68 +552,83 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.0.0-20191031171055-b133feaeeb2e/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc90/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.5.2/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= +github.com/openshift/api v0.0.0-20200326160804-ecb9283fe820/go.mod h1:RKMJ5CBnljLfnej+BJ/xnOWc3kZDvJUaIAEq2oKSPtE= +github.com/openshift/api v0.0.0-20201214114959-164a2fb63b5f/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= +github.com/openshift/api v0.0.0-20210105115604-44119421ec6b/go.mod h1:aqU5Cq+kqKKPbDMqxo9FojgDeSpNJI7iuskjXjtojDg= +github.com/openshift/api v0.0.0-20210409143810-a99ffa1cac67/go.mod h1:dZ4kytOo3svxJHNYd0J55hwe/6IQG5gAUHUE0F3Jkio= +github.com/openshift/api v0.0.0-20211209135129-c58d9f695577 h1:NUe82M8wMYXbd5s+WBAJ2QAZZivs+nhZ3zYgZFwKfqw= github.com/openshift/api v0.0.0-20211209135129-c58d9f695577/go.mod h1:DoslCwtqUpr3d/gsbq4ZlkaMEdYqKxuypsDjorcHhME= -github.com/openshift/api v0.0.0-20220110171111-997c316db5e1 h1:gvAPP+X17EZwlyim5d/KCmNng6zp+4fRxul0X2Z068A= -github.com/openshift/api v0.0.0-20220110171111-997c316db5e1/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= +github.com/openshift/build-machinery-go v0.0.0-20200211121458-5e3d6e570160/go.mod h1:1CkcsT3aVebzRBzVTSbiKSkJMsC/CASqxesfqEMfJEc= +github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210209125900-0da259a2c359/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20210712174854-1bb7fd1518d3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/build-machinery-go v0.0.0-20210806203541-4ea9b6da3a37/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3 h1:65oBhJYHzYK5VL0gF1eiYY37lLzyLZ47b9y5Kib1nf8= github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/openshift/client-go v0.0.0-20201214125552-e615e336eb49/go.mod h1:9/jG4I6sh+5QublJpZZ4Zs/P4/QCXMsQQ/K/058bSB8= +github.com/openshift/client-go v0.0.0-20210112165513-ebc401615f47/go.mod h1:u7NRAjtYVAKokiI9LouzTv4mhds8P4S1TwdVAfbjKSk= github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3 h1:SG1aqwleU6bGD0X4mhkTNupjVnByMYYuW4XbnCPavQU= github.com/openshift/client-go v0.0.0-20211209144617-7385dd6338e3/go.mod h1:cwhyki5lqBmrT0m8Im+9I7PGFaraOzcYPtEz93RcsGY= -github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca h1:F1MEnOMwSrTA0YAkO0he9ip9w0JhYzI/iCB2mXmaSPg= -github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= -github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492 h1:oj/rSQqVWVj6YJUydZwLz2frrJreiyI4oa9g/YPgMsM= -github.com/openshift/library-go v0.0.0-20211220195323-eca2c467c492/go.mod h1:4UQ9snU1vg53fyTpHQw3vLPiAxI8ub5xrc+y8KPQQFs= -github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344 h1:HfLsauJWwM1LZtPPmG9LExqphdddycUvaYtsfyYFn3o= -github.com/openshift/machine-config-operator v0.0.1-0.20220203091316-d3010b34d344/go.mod h1:FZ6GifJP0KAKiPE3kvsxdJgkoAMXbSnVbS4to1d+4QA= +github.com/openshift/library-go v0.0.0-20191003152030-97c62d8a2901/go.mod h1:NBttNjZpWwup/nthuLbPAPSYC8Qyo+BBK5bCtFoyYjo= +github.com/openshift/library-go v0.0.0-20210205203934-9eb0d970f2f4/go.mod h1:udseDnqxn5ON8i+NBjDp00fBTK0JRu1/6Y6tf6EivDE= +github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a h1:MoAaYFrzB5QlYzO7phyjx/JBxghUrLitwb69RaulRAs= +github.com/openshift/library-go v0.0.0-20211209153216-ed9bc958bd8a/go.mod h1:M/Gi/GUUrMdSS07nrYtTiK43J6/VUAyk/+IfN4ZqUY4= +github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25 h1:3EekrCI6YH2UOCPBB9pOxqv7FW50oaIaRxKLpH83YKI= +github.com/openshift/machine-config-operator v0.0.1-0.20210514234214-c415ce6aed25/go.mod h1:LC0tawtxYlQ94QiIMOZ68Q+B3xEO8Vq3FIn+srfm4mE= +github.com/openshift/runtime-utils v0.0.0-20200415173359-c45d4ff3f912/go.mod h1:0OXNy7VoqFexkxKqyQbHJLPwn1MFp1/CxRJAgKHM+/o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/operator-framework/api v0.10.7 h1:GlZJ6m+0WSVdSsSjTbhKKAvHXamWJXhwXHUhVwL8LBE= -github.com/operator-framework/api v0.10.7/go.mod h1:PtQSNSuVrhSC6YE6JJJZv3nnZJc32osKX8FmFUZK05U= -github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible h1:Po8C8RVLRWq7pNQ5pKonM9CXpC/osoBWbmsuf+HJnSI= -github.com/operator-framework/operator-lifecycle-manager v3.11.0+incompatible/go.mod h1:Ma5ZXd4S1vmMyewWlF7aO8CZiokR7Sd8dhSfkGkNU4U= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= @@ -631,14 +636,18 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -650,74 +659,85 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc= github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= -github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/storageos/go-api v2.2.0+incompatible/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -725,35 +745,46 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vbauerster/mpb/v5 v5.2.2/go.mod h1:W5Fvgw4dm3/0NhqzV8j6EacfuTe5SvnzBRwiXxDR9ww= github.com/vincent-petithory/dataurl v0.0.0-20160330182126-9a301d65acbb/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50 h1:uxE3GYdXIOfhMv3unJKETJEhw78gvzuQqRX/rVirc2A= github.com/vincent-petithory/dataurl v0.0.0-20191104211930-d1553a71de50/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= -github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= -github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -761,7 +792,10 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= @@ -769,9 +803,6 @@ go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lL go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -792,6 +823,7 @@ go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4 go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -807,29 +839,26 @@ go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go4.org v0.0.0-20200104003542-c7e774b10ea0 h1:M6XsnQeLwG+rHQ+/rrGh3puBI3WZEy9TBWmf2H+enQA= +go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -837,8 +866,6 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20210220032938-85be41e4509f/go.mod h1:I6l2HNBLBZEcrOoCpyKLdY2lHoRZ8lI4x60KMCQDft4= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -855,28 +882,28 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mobile v0.0.0-20201217150744-e6ae53a27f4f/go.mod h1:skQtrUTUwhdJvXM/2KKJzY8pDgNr9I/FOMqDVRPBUS4= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191209134235-331c550502dd/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190228165749-92fc7df08ae7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -887,7 +914,10 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -909,19 +939,16 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= -golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -934,7 +961,6 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -942,7 +968,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -950,6 +975,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -958,30 +984,27 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191110163157-d32e6e3b99c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -989,11 +1012,10 @@ golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1009,10 +1031,9 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1023,13 +1044,9 @@ golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1043,6 +1060,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1052,36 +1070,42 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1092,7 +1116,7 @@ golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117012304-6edc0a871e69/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200115044656-831fdb1e1868/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1112,6 +1136,7 @@ golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -1136,12 +1161,6 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1N golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1165,7 +1184,6 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1187,7 +1205,6 @@ google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1221,7 +1238,6 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1229,6 +1245,8 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= @@ -1261,31 +1279,32 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.30.0/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/op/go-logging.v1 v1.0.0-20160211212156-b2cb9fa56473/go.mod h1:N1eN2tsCx0Ydtgjl4cqmbRCsY4/+z4cYDeqwZTk6zog= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1297,11 +1316,12 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190502103701-55513cacd4ae/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20191010095647-fc94e3f71652/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1311,28 +1331,21 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= -howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.23.3 h1:KNrME8KHGr12Ozjf8ytOewKzZh6hl/hHUZeHddT3a38= -k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/apiextensions-apiserver v0.23.3 h1:JvPJA7hSEAqMRteveq4aj9semilAZYcJv+9HHFWfUdM= -k8s.io/apiextensions-apiserver v0.23.3/go.mod h1:/ZpRXdgKZA6DvIVPEmXDCZJN53YIQEUDF+hrpIQJL38= -k8s.io/apimachinery v0.23.3 h1:7IW6jxNzrXTsP0c8yXz2E5Yx/WTzVPTsHIx/2Vm0cIk= -k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apiserver v0.23.3/go.mod h1:3HhsTmC+Pn+Jctw+Ow0LHA4dQ4oXrQ4XJDzrVDG64T4= -k8s.io/cli-runtime v0.23.3/go.mod h1:yA00O5pDqnjkBh8fkuugBbfIfjB1nOpz+aYLotbnOfc= -k8s.io/client-go v0.23.3 h1:23QYUmCQ/W6hW78xIwm3XqZrrKZM+LWDqW2zfo+szJs= -k8s.io/client-go v0.23.3/go.mod h1:47oMd+YvAOqZM7pcQ6neJtBiFH7alOyfunYN48VsmwE= -k8s.io/cloud-provider v0.23.3/go.mod h1:Ik+pKlpPOp0Zs906xyOpT3g2xB9A8VGNdejMTZS6EeA= -k8s.io/cluster-bootstrap v0.23.3/go.mod h1:NwUIksUHKNOKIHg/AfLH4NxqylbfEVXUh9EX2NxHZII= -k8s.io/code-generator v0.23.3 h1:NSAKIkvkL8OaWr5DrF9CXGBJjhMp3itplT/6fwHQcAY= -k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/component-base v0.23.3 h1:q+epprVdylgecijVGVdf4MbizEL2feW4ssd7cdo6LVY= -k8s.io/component-base v0.23.3/go.mod h1:1Smc4C60rWG7d3HjSYpIwEbySQ3YWg0uzH5a2AtaTLg= -k8s.io/component-helpers v0.23.3/go.mod h1:SH+W/WPTaTenbWyDEeY7iytAQiMh45aqKxkvlqQ57cg= -k8s.io/controller-manager v0.23.3/go.mod h1:E0ss6ogA93sZ+AuibQSa7H4xWIiICTYFjowkjellVeU= -k8s.io/cri-api v0.23.3/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= -k8s.io/csi-translation-lib v0.23.3/go.mod h1:8J7hpeqMoCJWofd1lCs4vZrEshdbVYrqurFeB6GZ/+E= +k8s.io/api v0.23.0 h1:WrL1gb73VSC8obi8cuYETJGXEoFNEh3LU0Pt+Sokgro= +k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/apiextensions-apiserver v0.23.0 h1:uii8BYmHYiT2ZTAJxmvc3X8UhNYMxl2A0z0Xq3Pm+WY= +k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= +k8s.io/apimachinery v0.23.0 h1:mIfWRMjBuMdolAWJ3Fd+aPTMv3X9z+waiARMpvvb0HQ= +k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/cli-runtime v0.23.0/go.mod h1:B5N3YH0KP1iKr6gEuJ/RRmGjO0mJQ/f/JrsmEiPQAlU= +k8s.io/client-go v0.23.0 h1:vcsOqyPq7XV3QmQRCBH/t9BICJM9Q1M18qahjv+rebY= +k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= +k8s.io/code-generator v0.23.0 h1:lhyd2KJVCEmpjaCpuoooGs+e3xhPwpYvupnNRidO0Ds= +k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/component-base v0.23.0 h1:UAnyzjvVZ2ZR1lF35YwtNY6VMN94WtOnArcXBu34es8= +k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/component-helpers v0.23.0/go.mod h1:liXMh6FZS4qamKtMJQ7uLHnFe3tlC86RX5mJEk/aerg= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1341,57 +1354,46 @@ k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-aggregator v0.23.3/go.mod h1:pt5QJ3QaIdhZzNlUvN5wndbM0LNT4BvhszGkzy2QdFo= -k8s.io/kube-controller-manager v0.23.3/go.mod h1:e8m5dhjei67DlLZA/QTvenxiGyonG9UhgHtU1LMslJE= +k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-proxy v0.23.3/go.mod h1:XdvwqJkR9r0ddUAX4ruA4V22Kws3qzKvgL3rIq584Ko= -k8s.io/kube-scheduler v0.23.3/go.mod h1:/thFQoAMv9/olDOEYVSQbUohmkJJyIPUmpVu0UealSM= -k8s.io/kubectl v0.23.3/go.mod h1:VBeeXNgLhSabu4/k0O7Q0YujgnA3+CLTUE0RcmF73yY= -k8s.io/kubelet v0.23.3 h1:jYed8HoT0H2zXzf5Av+Ml8z5erN39uJfKh/yplYMgkg= -k8s.io/kubelet v0.23.3/go.mod h1:RZxGSCsiwoWJ9z6mVla+jhiLfCFIKC16yAS38D7GQSE= -k8s.io/kubernetes v1.23.3 h1:weuFJOkRP7+057uvhNUYbVTVCog/klquhbtKRD+UHUo= -k8s.io/kubernetes v1.23.3/go.mod h1:C0AB/I7M4Nu6d1ELyGdC8qrrHEc6J5l8CHUashza1Io= -k8s.io/legacy-cloud-providers v0.23.3/go.mod h1:s9vv59dUv4SU+HAm9C/YDdyw2OY9qmFYmcGEwr/ecDc= -k8s.io/metrics v0.23.3/go.mod h1:Ut8TvkbsO4oMVeUzaTArvPrcw9QRFLs2XNzUlORjdYE= -k8s.io/mount-utils v0.23.3/go.mod h1:OTN3LQPiOGMfx/SmVlsnySwsAmh4gYrDYLchlMHtf98= -k8s.io/pod-security-admission v0.23.3/go.mod h1:vULEGUgsujyrKBz3RRRZnvrJJt115gu0GICArDmgzqo= -k8s.io/sample-apiserver v0.23.3/go.mod h1:5yDZRMfFvp7/2BOXBwk0AFNsD00iyuXeEsWZSoLFeGw= -k8s.io/system-validators v1.6.0/go.mod h1:bPldcLgkIUK22ALflnsXk8pvkTEndYdNuaHH6gRrl0Q= +k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI= +k8s.io/kubelet v0.23.0/go.mod h1:A4DxfIt5Ka+rz54HAFhs1bgiFjJT6lcaAYUcACZl1/k= +k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -kubevirt.io/qe-tools v0.1.6 h1:S6z9CATmgV2/z9CWetij++Rhu7l/Z4ObZqerLdNMo0Y= -kubevirt.io/qe-tools v0.1.6/go.mod h1:PJyH/YXC4W0AmxfheDmXWMbLNsMSboVGXKpMAwfKzVE= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.27/go.mod h1:tq2nT0Kx7W+/f2JVE+zxYtUhdjuELJkVpNz+x/QN5R4= -sigs.k8s.io/controller-runtime v0.11.1 h1:7YIHT2QnHJArj/dk9aUkYhfqfK5cIxPOX5gPECfdZLU= -sigs.k8s.io/controller-runtime v0.11.1/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= +sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ= +sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= sigs.k8s.io/controller-tools v0.7.0 h1:iZIz1vEcavyEfxjcTLs1WH/MPf4vhPCtTKhoHqV8/G0= sigs.k8s.io/controller-tools v0.7.0/go.mod h1:bpBAo0VcSDDLuWt47evLhMLPxRPxMDInTEH/YbdeMK0= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/kube-storage-version-migrator v0.0.3/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= +sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= -sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt deleted file mode 100644 index 767efde98..000000000 --- a/hack/boilerplate.go.txt +++ /dev/null @@ -1,15 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ \ No newline at end of file diff --git a/hack/build-latency-test-bin.sh b/hack/build-latency-test-bin.sh deleted file mode 100755 index 25da6f2e8..000000000 --- a/hack/build-latency-test-bin.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -eu - -if ! which go &>/dev/null; then - echo "No go command available" - exit 1 -fi - -go test -v -c -o build/_output/bin/latency-e2e.test ./functests/4_latency diff --git a/hack/build-test-bin.sh b/hack/build-test-bin.sh deleted file mode 100755 index 4b9eb8023..000000000 --- a/hack/build-test-bin.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -set -e - -if ! which go; then - echo "No go command available" - exit 1 -fi - -GOPATH="${GOPATH:-~/go}" -export PATH=$PATH:$GOPATH/bin - -if ! which gingko; then - echo "Downloading ginkgo tool" - go install github.com/onsi/ginkgo/ginkgo -fi - -ginkgo build ./functests/* diff --git a/hack/clean-deploy.sh b/hack/clean-deploy.sh deleted file mode 100755 index d6b7da84a..000000000 --- a/hack/clean-deploy.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -# expect oc to be in PATH by default -OC_TOOL="${OC_TOOL:-oc}" - -profiles=$(${OC_TOOL} get performanceprofile -o name) -for profileName in $profiles -do - nodeSelector="$(${OC_TOOL} get $profileName -o=jsonpath='{.spec.nodeSelector}' | awk -F'[/"]' '{print $3}')" - - if [[ $nodeSelector != "worker" ]]; then - mcps+=($(${OC_TOOL} get mcp -l machineconfiguration.openshift.io/role=$nodeSelector -o name | awk -F "/" '{print $2}')) - nodes=$(${OC_TOOL} get nodes --selector="node-role.kubernetes.io/${nodeSelector}" -o name) - for node in $nodes - do - echo "[INFO]: Unlabeling node $node" - ${OC_TOOL} label $node node-role.kubernetes.io/${nodeSelector}- - done - fi -done - -# Give MCO some time to notice change -sleep 10 - -# Wait for worker MCP being updated -success=0 -iterations=0 -sleep_time=10 -max_iterations=180 # results in 30 minute timeout -until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]] -do - echo "[INFO] Checking if MCP is updated" - if ! ${OC_TOOL} wait mcp/worker --for condition=Updated --timeout 1s - then - iterations=$((iterations + 1)) - iterations_left=$((max_iterations - iterations)) - echo "[INFO] MCP not updated yet. $iterations_left retries left." - sleep $sleep_time - continue - fi - - success=1 - -done - -if [[ $success -eq 0 ]]; then - echo "[ERROR] MCP update failed, going on nonetheless." -fi - -# Delete CRs: this will undeploy all the MCs etc. (once it is implemented) -echo "[INFO] Deleting PerformanceProfile and giving the operator some time to undeploy everything" -$OC_TOOL delete performanceprofile --all -sleep 30 - -# Delete worker-cnf MCP -for mcp in "${mcps[@]}" -do - echo "[INFO] Deleting MCP $mcp" - $OC_TOOL delete mcp $mcp -done - diff --git a/hack/deploy.sh b/hack/deploy.sh deleted file mode 100755 index 052da0465..000000000 --- a/hack/deploy.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash - -set -e - -# expect oc to be in PATH by default -OC_TOOL="${OC_TOOL:-oc}" - -# Deploy features -success=0 -iterations=0 -sleep_time=10 -max_iterations=30 # results in 5 minute timeout -feature_dir=test/e2e/pao/cluster-setup/${CLUSTER}-cluster/performance/ - -until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]] -do - - echo "[INFO] Deploying performance profile." - set +e - - # be verbose on last iteration only - if [[ $iterations -eq $((max_iterations - 1)) ]] || [[ -n "${VERBOSE}" ]]; then - ${OC_TOOL} kustomize $feature_dir | envsubst | ${OC_TOOL} apply -f - - else - ${OC_TOOL} kustomize $feature_dir | envsubst | ${OC_TOOL} apply -f - &> /dev/null - fi - - # shellcheck disable=SC2181 - if [[ $? != 0 ]];then - - iterations=$((iterations + 1)) - iterations_left=$((max_iterations - iterations)) - if [[ $iterations_left != 0 ]]; then - echo "[WARN] Deployment did not fully succeed yet, retrying in $sleep_time sec, $iterations_left retries left" - sleep $sleep_time - else - echo "[WARN] At least one deployment failed, giving up" - fi - - else - # All features deployed successfully - success=1 - fi - set -e - -done - -if [[ $success -eq 0 ]]; then - echo "[ERROR] Deployment failed, giving up." - exit 1 -fi - -echo "[INFO] Deployment successful." diff --git a/hack/docs-generate.sh b/hack/docs-generate.sh deleted file mode 100755 index 2ee3143ce..000000000 --- a/hack/docs-generate.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e - -export GOROOT=$(go env GOROOT) - -export PERF_PROFILE_TYPES=api/v2/performanceprofile_types.go -export PERF_PROFILE_DOC=docs/performance_profile.md - -# using the generated CSV, create the real CSV by injecting all the right data into it -build/_output/bin/docs-generator -- $PERF_PROFILE_TYPES > $PERF_PROFILE_DOC - -echo "API docs updated" diff --git a/hack/label-worker-cnf.sh b/hack/label-worker-cnf.sh deleted file mode 100755 index 4ea755c11..000000000 --- a/hack/label-worker-cnf.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -set -e - -# expect oc to be in PATH by default -OC_TOOL="${OC_TOOL:-oc}" - -# Label 1 worker node -echo "[INFO]: Labeling 1 worker node with worker-cnf" -node=$(${OC_TOOL} get nodes --selector='node-role.kubernetes.io/worker' \ - --selector='!node-role.kubernetes.io/master' -o name | head -1) - -${OC_TOOL} label --overwrite $node node-role.kubernetes.io/worker-cnf="" diff --git a/hack/lint.sh b/hack/lint.sh deleted file mode 100755 index d76dbe34a..000000000 --- a/hack/lint.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -which golint -if [ $? -ne 0 ]; then - echo "Downloading golint tool" - go get -u golang.org/x/lint/golint -fi - -RETVAL=0 -GENERATED_FILES="zz_generated.*.go" -for file in $(find . -path ./vendor -prune -o -type f -name '*.go' -print | grep -E -v "$GENERATED_FILES" | grep -E -v "functests"); do - golint -set_exit_status "$file" - if [[ $? -ne 0 ]]; then - RETVAL=1 - fi -done -exit $RETVAL diff --git a/hack/release-note.sh b/hack/release-note.sh deleted file mode 100755 index c59c69a17..000000000 --- a/hack/release-note.sh +++ /dev/null @@ -1,40 +0,0 @@ -#!/bin/bash - -# This is generating a release note, which will be used for github releases - -RELEASE_NOTE_FILE="build/_output/release-note.md" - -# Current tag -RELREF=${RELREF:-$(git describe --abbrev=0 --tags)} - -# Previous tag -PREREF=${PREREF:-$(git describe --abbrev=0 --tags $RELREF^)} - -RELSPANREF=$PREREF..$RELREF - -GHRELURL="https://github.com/openshift/cluster-node-tuning-operator/releases/tag/" -RELURL="$GHRELURL$RELREF" - -CHANGES_COUNT=$(git log --oneline $RELSPANREF | wc -l) -CHANGES_BY_COUNT=$(git shortlog -sne $RELSPANREF | wc -l) -STATS=$(git diff --shortstat $RELSPANREF) - -cat < "${RELEASE_NOTE_FILE}" -## Performance Addon Operator - -This is release "${RELREF}" of the performance addon operator, which follows "${PREREF}". -This release consists of ${CHANGES_COUNT} changes by ${CHANGES_BY_COUNT} contributers: -${STATS} - -The primary release artifact of the performance addon operator is the git tree. -The source code and selected build artifacts are available for download at: -${RELURL} - -Pre-built containers are published on quay.io and can be viewed at: -https://quay.io/organization/openshift-kni - -### Notable changes - -*TODO* - -EOF \ No newline at end of file diff --git a/hack/run-functests.sh b/hack/run-functests.sh deleted file mode 100755 index d9b6c165c..000000000 --- a/hack/run-functests.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests"} -LATENCY_TEST_RUN=${LATENCY_TEST_RUN:-"false"} - -which ginkgo -if [ $? -ne 0 ]; then - echo "Downloading ginkgo tool" - #go install github.com/onsi/ginkgo/ginkgo - go install github.com/onsi/ginkgo/ginkgo@v1.16.5 -fi - -NO_COLOR="" -if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then - echo "Terminal does not seem to support colored output, disabling it" - NO_COLOR="-noColor" -fi - -# run the latency tests under the OpenShift CI, just to verify that the image works -if [ -n "${IMAGE_FORMAT}" ]; then - LATENCY_TEST_RUN="true" -fi - - -echo "Running Functional Tests: ${GINKGO_SUITS}" -# -v: print out the text and location for each spec before running it and flush output to stdout in realtime -# -r: run suites recursively -# --failFast: ginkgo will stop the suite right after the first spec failure -# --flakeAttempts: rerun the test if it fails -# -requireSuite: fail if tests are not executed because of missing suite -GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts diff --git a/hack/run-latency-testing.sh b/hack/run-latency-testing.sh deleted file mode 100755 index 4cc95403f..000000000 --- a/hack/run-latency-testing.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -GINKGO_SUITS=${GINKGO_SUITS:-functests/5_latency_testing} - -which ginkgo -if [ $? -ne 0 ]; then - echo "Downloading ginkgo tool" - go install github.com/onsi/ginkgo/ginkgo -fi - -NO_COLOR="" -if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then - echo "Terminal does not seem to support colored output, disabling it" - NO_COLOR="-noColor" -fi - - -echo "Running Functional Tests: ${GINKGO_SUITS}" -# -v: print out the text and location for each spec before running it and flush output to stdout in realtime -# -r: run suites recursively -# --failFast: ginkgo will stop the suite right after the first spec failure -# --flakeAttempts: rerun the test if it fails -# -requireSuite: fail if tests are not executed because of missing suite -GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts - diff --git a/hack/run-perf-profile-creator-functests.sh b/hack/run-perf-profile-creator-functests.sh deleted file mode 100755 index 6636b525e..000000000 --- a/hack/run-perf-profile-creator-functests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests-performance-profile-creator"} - -which ginkgo -if [ $? -ne 0 ]; then - echo "Downloading ginkgo tool" - #go install github.com/onsi/ginkgo/ginkgo - go install github.com/onsi/ginkgo/ginkgo@v1.16.5 -fi - -NO_COLOR="" -if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then - echo "Terminal does not seem to support colored output, disabling it" - NO_COLOR="-noColor" -fi - - -echo "Running Functional Tests: ${GINKGO_SUITS}" -# -v: print out the text and location for each spec before running it and flush output to stdout in realtime -# -r: run suites recursively -# --failFast: ginkgo will stop the suite right after the first spec failure -# --flakeAttempts: rerun the test if it fails -# -requireSuite: fail if tests are not executed because of missing suite -GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts diff --git a/hack/run-perf-profile-creator.sh b/hack/run-perf-profile-creator.sh deleted file mode 100755 index b50e13f37..000000000 --- a/hack/run-perf-profile-creator.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/bin/bash - -readonly CONTAINER_RUNTIME=${CONTAINER_RUNTIME:-podman} -readonly CURRENT_SCRIPT=$(basename "$0") -readonly CMD="${CONTAINER_RUNTIME} run --entrypoint performance-profile-creator" -readonly IMG_EXISTS_CMD="${CONTAINER_RUNTIME} image exists" -readonly IMG_PULL_CMD="${CONTAINER_RUNTIME} image pull" -readonly MUST_GATHER_VOL="/must-gather" - -PAO_IMG="quay.io/openshift-kni/performance-addon-operator:4.11-snapshot" -MG_TARBALL="" -DATA_DIR="" - -usage() { - print "Wrapper usage:" - print " ${CURRENT_SCRIPT} [-h] [-p image][-t path] -- [performance-profile-creator flags]" - print "" - print "Options:" - print " -h help for ${CURRENT_SCRIPT}" - print " -p Performance Addon Operator image" - print " -t path to a must-gather tarball" - - ${IMG_EXISTS_CMD} "${PAO_IMG}" && ${CMD} "${PAO_IMG}" -h -} - -function cleanup { - [ -d "${DATA_DIR}" ] && rm -rf "${DATA_DIR}" -} -trap cleanup EXIT - -exit_error() { - print "error: $*" - usage - exit 1 -} - -print() { - echo "$*" >&2 -} - -check_requirements() { - ${IMG_EXISTS_CMD} "${PAO_IMG}" || ${IMG_PULL_CMD} "${PAO_IMG}" || \ - exit_error "Performance Addon Operator image not found" - - [ -n "${MG_TARBALL}" ] || exit_error "Must-gather tarball file path is mandatory" - [ -f "${MG_TARBALL}" ] || exit_error "Must-gather tarball file not found" - - DATA_DIR=$(mktemp -d -t "${CURRENT_SCRIPT}XXXX") || exit_error "Cannot create the data directory" - tar -zxf "${MG_TARBALL}" --directory "${DATA_DIR}" || exit_error "Cannot decompress the must-gather tarball" - chmod a+rx "${DATA_DIR}" - - return 0 -} - -main() { - while getopts ':hp:t:' OPT; do - case "${OPT}" in - h) - usage - exit 0 - ;; - p) - PAO_IMG="${OPTARG}" - ;; - t) - MG_TARBALL="${OPTARG}" - ;; - ?) - exit_error "invalid argument: ${OPTARG}" - ;; - esac - done - shift $((OPTIND - 1)) - - check_requirements || exit 1 - - ${CMD} -v "${DATA_DIR}:${MUST_GATHER_VOL}:z" "${PAO_IMG}" "$@" --must-gather-dir-path "${MUST_GATHER_VOL}" - echo "" 1>&2 -} - -main "$@" diff --git a/hack/run-render-command-functests.sh b/hack/run-render-command-functests.sh deleted file mode 100755 index 7b5fd7c53..000000000 --- a/hack/run-render-command-functests.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash - -GINKGO_SUITS=${GINKGO_SUITS:-"test/e2e/pao/functests-render-command"} - -which ginkgo -if [ $? -ne 0 ]; then - echo "Downloading ginkgo tool" - #go install github.com/onsi/ginkgo/ginkgo - go install github.com/onsi/ginkgo/ginkgo@v1.16.5 -fi - -NO_COLOR="" -if ! which tput &> /dev/null 2>&1 || [[ $(tput -T$TERM colors) -lt 8 ]]; then - echo "Terminal does not seem to support colored output, disabling it" - NO_COLOR="-noColor" -fi - - -echo "Running Functional Tests: ${GINKGO_SUITS}" -# -v: print out the text and location for each spec before running it and flush output to stdout in realtime -# -r: run suites recursively -# --failFast: ginkgo will stop the suite right after the first spec failure -# --flakeAttempts: rerun the test if it fails -# -requireSuite: fail if tests are not executed because of missing suite -GOFLAGS=-mod=vendor ginkgo $NO_COLOR --v -r --failFast --flakeAttempts=2 -requireSuite ${GINKGO_SUITS} -- -junitDir /tmp/artifacts diff --git a/hack/show-cluster-version.sh b/hack/show-cluster-version.sh deleted file mode 100755 index c5718b7e3..000000000 --- a/hack/show-cluster-version.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -# expect oc to be in PATH by default -OC_TOOL="${OC_TOOL:-oc}" - -echo "Cluster version" -${OC_TOOL} version || : -${OC_TOOL} get nodes -o custom-columns=VERSION:.status.nodeInfo.kubeletVersion || : -${OC_TOOL} get clusterversion || : diff --git a/hack/unittests.sh b/hack/unittests.sh deleted file mode 100755 index 84442c296..000000000 --- a/hack/unittests.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/usr/bin/env bash - -set -e - -OUTDIR="build/_output/coverage" -mkdir -p "$OUTDIR" - -COVER_FILE="${OUTDIR}/cover.out" -FUNC_FILE="${OUTDIR}/coverage.txt" -HTML_FILE="${OUTDIR}/coverage.html" - -echo "running unittests with coverage" -GOFLAGS=-mod=vendor go test -race -covermode=atomic -coverprofile="${COVER_FILE}" -v ./pkg/... ./controllers/... ./api/... - -if [[ -n "${DRONE}" ]]; then - - # Uploading coverage report to coveralls.io - go get github.com/mattn/goveralls - - # we should update the vendor/modules.txt once we got a new package - go mod vendor - $(go env GOPATH)/bin/goveralls -coverprofile="$COVER_FILE" -service=drone.io - -else - - echo "creating coverage reports" - go tool cover -func="${COVER_FILE}" > "${FUNC_FILE}" - go tool cover -html="${COVER_FILE}" -o "${HTML_FILE}" - echo "find coverage reports at ${OUTDIR}" - -fi diff --git a/hack/verify-generated.sh b/hack/verify-generated.sh deleted file mode 100755 index 434e4e192..000000000 --- a/hack/verify-generated.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash - -if [[ -n "$(git status --porcelain .)" ]]; then - echo "uncommitted generated files. run 'make generate' and commit results." - echo "$(git status --porcelain .)" - exit 1 -fi diff --git a/hack/wait-for-mcp.sh b/hack/wait-for-mcp.sh deleted file mode 100755 index 2013ef274..000000000 --- a/hack/wait-for-mcp.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/bash - -set -e - -# expect oc to be in PATH by default -OC_TOOL="${OC_TOOL:-oc}" - -success=0 -iterations=0 -sleep_time=10 -max_iterations=180 # results in 30 minute timeout - -# Let's gibe the operator some time to do its work before we unpause the MCP (see below) -echo "[INFO] Waiting a bit for letting the operator do its work" -sleep 30 - -until [[ $success -eq 1 ]] || [[ $iterations -eq $max_iterations ]] -do - - echo "[INFO] Unpausing MCPs" - set +e - mcps=$(${OC_TOOL} get mcp --no-headers -o custom-columns=":metadata.name") - for mcp in $mcps - do - ${OC_TOOL} patch mcp "${mcp}" -p '{"spec":{"paused":false}}' --type=merge &> /dev/null - done - set -e - - echo "[INFO] Checking if MCP picked up the performance MC" - # MC with new generated name - mc_new="$(${OC_TOOL} get mcp worker-cnf -o jsonpath='{.spec.configuration.source[?(@.name=="50-performance-'$CLUSTER'")].name}')" - # MC with old generated name - mc_old="$(${OC_TOOL} get mcp worker-cnf -o jsonpath='{.spec.configuration.source[?(@.name=="performance-'$CLUSTER'")].name}')" - # No output means that the new machine config wasn't picked by MCO yet - if [ -z "${mc_new}" ] && [ -z "${mc_old}" ] - then - iterations=$((iterations + 1)) - iterations_left=$((max_iterations - iterations)) - echo "[INFO] Performance MC not picked up yet. $iterations_left retries left." - sleep $sleep_time - continue - fi - - echo "[INFO] Checking if MCP is updated" - if ! ${OC_TOOL} wait mcp/worker-cnf --for condition=Updated --timeout 1s &> /dev/null - then - iterations=$((iterations + 1)) - iterations_left=$((max_iterations - iterations)) - if [[ $iterations_left != 0 ]]; then - echo "[WARN] MCP not updated yet, retrying in $sleep_time sec, $iterations_left retries left" - sleep $sleep_time - fi - else - success=1 - fi - - -done - -if [[ $success -eq 0 ]]; then - echo "[ERROR] MCP update failed, giving up." - exit 1 -fi - -echo "[INFO] MCP update successful." diff --git a/manifests/20-performance-profile.crd.yaml b/manifests/20-performance-profile.crd.yaml deleted file mode 100644 index 70afc2677..000000000 --- a/manifests/20-performance-profile.crd.yaml +++ /dev/null @@ -1,490 +0,0 @@ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - include.release.openshift.io/ibm-cloud-managed: "true" - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - service.beta.openshift.io/inject-cabundle: "true" - name: performanceprofiles.performance.openshift.io -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - name: performance-addon-operator-service - namespace: openshift-cluster-node-tuning-operator - path: /convert - port: 443 - conversionReviewVersions: - - v1 - - v1alpha1 - group: performance.openshift.io - names: - kind: PerformanceProfile - listKind: PerformanceProfileList - plural: performanceprofiles - singular: performanceprofile - scope: Cluster - versions: - - deprecated: true - deprecationWarning: v1 is deprecated and should be removed in next three releases, use v2 instead - name: v1 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles API - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - type: object - required: - - cpu - - nodeSelector - properties: - additionalKernelArgs: - description: Addional kernel arguments. - type: array - items: - type: string - cpu: - description: CPU defines a set of CPU related parameters. - type: object - required: - - isolated - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - type: string - globallyDisableIrqLoadBalancing: - description: GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. Defaults to "false" - type: boolean - hugepages: - description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node. - type: object - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate at boot time. - type: array - items: - description: HugePage defines the number of allocated huge pages of the specific size. - type: object - properties: - count: - description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - type: integer - format: int32 - node: - description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes - type: integer - format: int32 - size: - description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - type: string - machineConfigLabel: - description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - machineConfigPoolSelector: - description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - net: - description: Net defines a set of network related features - type: object - properties: - devices: - description: Devices contains a list of network device representations that will be set with a netqueue count equal to CPU.Reserved . If no devices are specified then the default is all devices. - type: array - items: - description: 'Device defines a way to represent a network device in several options: device name, vendor ID, model ID, PCI path and MAC address' - type: object - properties: - deviceID: - description: Network device ID (model) represnted as a 16 bit hexmadecimal number. - type: string - interfaceName: - description: Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative. - type: string - vendorID: - description: Network device vendor ID represnted as a 16 bit Hexmadecimal number. - type: string - userLevelNetworking: - description: UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false". - type: boolean - nodeSelector: - description: 'NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile. In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format /: "" in order to be able to calculate the default values for the former mentioned fields.' - type: object - additionalProperties: - type: string - numa: - description: NUMA defines options related to topology aware affinities - type: object - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort" - type: string - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - type: object - properties: - enabled: - description: Enabled defines if the real time kernel packages should be installed. Defaults to "false" - type: boolean - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - type: object - properties: - conditions: - description: Conditions represents the latest available observations of current state. - type: array - items: - description: Condition represents the state of the operator's reconciliation functionality. - type: object - required: - - status - - type - properties: - lastHeartbeatTime: - type: string - format: date-time - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation functionality. - type: string - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - type: string - served: true - storage: false - subresources: - status: {} - - deprecated: true - deprecationWarning: v1alpha1 is deprecated and should be removed in the next release, use v2 instead - name: v1alpha1 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles API - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - type: object - properties: - additionalKernelArgs: - description: Addional kernel arguments. - type: array - items: - type: string - cpu: - description: CPU defines a set of CPU related parameters. - type: object - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - type: string - hugepages: - description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node. - type: object - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate at boot time. - type: array - items: - description: HugePage defines the number of allocated huge pages of the specific size. - type: object - properties: - count: - description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - type: integer - format: int32 - node: - description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes - type: integer - format: int32 - size: - description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - type: string - machineConfigLabel: - description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - machineConfigPoolSelector: - description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - nodeSelector: - description: NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile. - type: object - additionalProperties: - type: string - numa: - description: NUMA defines options related to topology aware affinities - type: object - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort" - type: string - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - type: object - properties: - enabled: - description: Enabled defines if the real time kernel packages should be installed. Defaults to "false" - type: boolean - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - type: object - properties: - conditions: - description: Conditions represents the latest available observations of current state. - type: array - items: - description: Condition represents the state of the operator's reconciliation functionality. - type: object - required: - - status - - type - properties: - lastHeartbeatTime: - type: string - format: date-time - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation functionality. - type: string - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - type: string - served: true - storage: false - subresources: - status: {} - - name: v2 - schema: - openAPIV3Schema: - description: PerformanceProfile is the Schema for the performanceprofiles API - type: object - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: PerformanceProfileSpec defines the desired state of PerformanceProfile. - type: object - required: - - cpu - - nodeSelector - properties: - additionalKernelArgs: - description: Addional kernel arguments. - type: array - items: - type: string - cpu: - description: CPU defines a set of CPU related parameters. - type: object - required: - - isolated - - reserved - properties: - balanceIsolated: - description: BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to explicitly assign each thread to a specific cpu in order to work across multiple CPUs. Setting this to "true" allows workloads to be balanced across CPUs. Setting this to "false" offers the most predictable performance for guaranteed workloads, but it offloads the complexity of cpu load balancing to the application. Defaults to "true" - type: boolean - isolated: - description: 'Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, which means removing as many extraneous tasks off a CPU as possible. It is important to notice the CPU manager can choose any CPU to run the workload except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: 1. The union of reserved CPUs and isolated CPUs should include all online CPUs 2. The isolated CPUs field should be the complementary to reserved CPUs field' - type: string - reserved: - description: Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - type: string - globallyDisableIrqLoadBalancing: - description: GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. Defaults to "false" - type: boolean - hugepages: - description: HugePages defines a set of huge pages related parameters. It is possible to set huge pages with multiple size values at the same time. For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. It is important to notice that setting hugepages default size to 1G will remove all 2M related folders from the node and it will be impossible to configure 2M hugepages under the node. - type: object - properties: - defaultHugepagesSize: - description: DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - type: string - pages: - description: Pages defines huge pages that we want to allocate at boot time. - type: array - items: - description: HugePage defines the number of allocated huge pages of the specific size. - type: object - properties: - count: - description: Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - type: integer - format: int32 - node: - description: Node defines the NUMA node where hugepages will be allocated, if not specified, pages will be allocated equally between NUMA nodes - type: integer - format: int32 - size: - description: Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - type: string - machineConfigLabel: - description: MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - machineConfigPoolSelector: - description: MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector of resources like KubeletConfigs created by the operator. Defaults to "machineconfiguration.openshift.io/role=" - type: object - additionalProperties: - type: string - net: - description: Net defines a set of network related features - type: object - properties: - devices: - description: Devices contains a list of network device representations that will be set with a netqueue count equal to CPU.Reserved . If no devices are specified then the default is all devices. - type: array - items: - description: 'Device defines a way to represent a network device in several options: device name, vendor ID, model ID, PCI path and MAC address' - type: object - properties: - deviceID: - description: Network device ID (model) represnted as a 16 bit hexmadecimal number. - type: string - interfaceName: - description: Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative. - type: string - vendorID: - description: Network device vendor ID represnted as a 16 bit Hexmadecimal number. - type: string - userLevelNetworking: - description: UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false". - type: boolean - nodeSelector: - description: 'NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool which targets this performance profile. In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format /: "" in order to be able to calculate the default values for the former mentioned fields.' - type: object - additionalProperties: - type: string - numa: - description: NUMA defines options related to topology aware affinities - type: object - properties: - topologyPolicy: - description: Name of the policy applied when TopologyManager is enabled Operator defaults to "best-effort" - type: string - realTimeKernel: - description: RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - type: object - properties: - enabled: - description: Enabled defines if the real time kernel packages should be installed. Defaults to "false" - type: boolean - status: - description: PerformanceProfileStatus defines the observed state of PerformanceProfile. - type: object - properties: - conditions: - description: Conditions represents the latest available observations of current state. - type: array - items: - description: Condition represents the state of the operator's reconciliation functionality. - type: object - required: - - status - - type - properties: - lastHeartbeatTime: - type: string - format: date-time - lastTransitionTime: - type: string - format: date-time - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation functionality. - type: string - runtimeClass: - description: RuntimeClass contains the name of the RuntimeClass resource created by the operator. - type: string - tuned: - description: Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - type: string - served: true - storage: true - subresources: - status: {} -status: - acceptedNames: - kind: "" - plural: "" - conditions: [] - storedVersions: [] diff --git a/manifests/40-rbac.yaml b/manifests/40-rbac.yaml index a31bee605..419a06a49 100644 --- a/manifests/40-rbac.yaml +++ b/manifests/40-rbac.yaml @@ -38,7 +38,7 @@ rules: # "" indicates the core API group - apiGroups: [""] resources: ["configmaps","events"] - verbs: ["create","get","delete","list","update","watch","patch"] + verbs: ["create","get","delete","list","update","watch"] # The pod-matching functionality will likely be deprecated in the # future. When it is, remove "pods" below. - apiGroups: [""] @@ -59,7 +59,7 @@ rules: verbs: ["update"] # Needed by the core operator functionality. - apiGroups: ["machineconfiguration.openshift.io"] - resources: ["kubeletconfigs", "machineconfigs"] + resources: ["machineconfigs"] verbs: ["create","get","delete","list","update","watch"] # Needed by the core operator functionality. - apiGroups: ["machineconfiguration.openshift.io"] @@ -69,18 +69,6 @@ rules: - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["create","get","update","patch"] -# Needed by the performance-addon-controller. -# The PAO creates runtime class for each profile that can be used under pods to -# extend CRI-O functionality. -- apiGroups: ["node.k8s.io"] - resources: ["runtimeclasses"] - verbs: ["create","get","delete","list","update","watch"] -- apiGroups: ["performance.openshift.io"] - resources: ["*"] - verbs: ["*"] -- apiGroups: ["operators.coreos.com"] - resources: ["clusterserviceversions","operatorgroups","subscriptions"] - verbs: ["get","delete","list","update","watch"] --- # Bind the operator cluster role to its Service Account. diff --git a/manifests/45-webhook-configuration.yaml b/manifests/45-webhook-configuration.yaml deleted file mode 100644 index 5ee281e95..000000000 --- a/manifests/45-webhook-configuration.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - annotations: - include.release.openshift.io/self-managed-high-availability: "true" - include.release.openshift.io/single-node-developer: "true" - service.beta.openshift.io/serving-cert-secret-name: performance-addon-operator-webhook-cert - labels: - name: performance-addon-operator-service - name: performance-addon-operator-service - namespace: openshift-cluster-node-tuning-operator -spec: - ports: - - name: "443" - port: 443 - protocol: TCP - targetPort: 4343 - selector: - name: cluster-node-tuning-operator - type: ClusterIP - ---- - -apiVersion: admissionregistration.k8s.io/v1 -kind: ValidatingWebhookConfiguration -metadata: - annotations: - service.beta.openshift.io/inject-cabundle: "true" - name: performance-addon-operator -webhooks: - - admissionReviewVersions: - - v1 - clientConfig: - service: - name: performance-addon-operator-service - namespace: openshift-cluster-node-tuning-operator - path: /validate-performance-openshift-io-v2-performanceprofile - port: 443 - failurePolicy: Fail - matchPolicy: Equivalent - name: vwb.performance.openshift.io - rules: - - apiGroups: - - performance.openshift.io - apiVersions: - - v2 - operations: - - CREATE - - UPDATE - resources: - - performanceprofiles - scope: '*' - sideEffects: None - timeoutSeconds: 10 diff --git a/manifests/50-operator-ibm-cloud-managed.yaml b/manifests/50-operator-ibm-cloud-managed.yaml index 35a2e940c..7de8b1d40 100644 --- a/manifests/50-operator-ibm-cloud-managed.yaml +++ b/manifests/50-operator-ibm-cloud-managed.yaml @@ -54,8 +54,6 @@ spec: name: node-tuning-operator-tls - mountPath: /var/run/configmaps/trusted-ca/ name: trusted-ca - - mountPath: /apiserver.local.config/certificates - name: apiservice-cert priorityClassName: system-cluster-critical securityContext: runAsNonRoot: true @@ -77,15 +75,6 @@ spec: - name: node-tuning-operator-tls secret: secretName: node-tuning-operator-tls - - name: apiservice-cert - secret: - defaultMode: 420 - items: - - key: tls.crt - path: apiserver.crt - - key: tls.key - path: apiserver.key - secretName: performance-addon-operator-webhook-cert - configMap: items: - key: ca-bundle.crt diff --git a/manifests/50-operator.yaml b/manifests/50-operator.yaml index 8348673d0..45b572a5c 100644 --- a/manifests/50-operator.yaml +++ b/manifests/50-operator.yaml @@ -1,4 +1,3 @@ ---- apiVersion: apps/v1 kind: Deployment metadata: @@ -75,21 +74,10 @@ spec: mountPath: /etc/secrets - name: trusted-ca mountPath: /var/run/configmaps/trusted-ca/ - - name: apiservice-cert - mountPath: /apiserver.local.config/certificates volumes: - name: node-tuning-operator-tls secret: secretName: node-tuning-operator-tls - - name: apiservice-cert - secret: - defaultMode: 420 - items: - - key: tls.crt - path: apiserver.crt - - key: tls.key - path: apiserver.key - secretName: performance-addon-operator-webhook-cert - name: trusted-ca configMap: name: trusted-ca diff --git a/pkg/apis/pao/performance_suite_test.go b/pkg/apis/pao/performance_suite_test.go deleted file mode 100644 index 90276c5f1..000000000 --- a/pkg/apis/pao/performance_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package performance - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestPerformance(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Performance Suite") -} diff --git a/pkg/apis/pao/performance_test.go b/pkg/apis/pao/performance_test.go deleted file mode 100644 index 495209e55..000000000 --- a/pkg/apis/pao/performance_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package performance - -import ( - "io/ioutil" - "strings" - - "github.com/RHsyseng/operator-utils/pkg/validation" - "github.com/ghodss/yaml" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" -) - -const ( - crFilename = "../../../examples/pao/samples/performance_v2_performanceprofile.yaml" - crdFilename = "../../../examples/pao/crd/bases/performance.openshift.io_performanceprofiles.yaml" - lastHeartbeatPath = "/status/conditions/lastHeartbeatTime" - lastTransitionPath = "/status/conditions/lastTransitionTime" -) - -var _ = Describe("PerformanceProfile CR(D) Schema", func() { - var schema validation.Schema - - BeforeEach(func() { - var err error - schema, err = getSchema(crdFilename) - Expect(err).ToNot(HaveOccurred()) - Expect(schema).ToNot(BeNil()) - }) - - It("should validate PerformanceProfile struct fields are represented recursively in the CRD", func() { - // add any CRD paths to omit from validation check [deeply nested properties, generated timestamps, etc.] - pathOmissions := []string{ - lastHeartbeatPath, - lastTransitionPath, - } - missingEntries := getMissingEntries(schema, &performancev2.PerformanceProfile{}, pathOmissions...) - Expect(missingEntries).To(BeEmpty()) - }) - - It("should validate CR contents & formatting against provided CRD schema", func() { - cr, err := getCR(crFilename) - Expect(err).ToNot(HaveOccurred()) - Expect(cr).ToNot(BeNil()) - - // schema.Validate wraps a number of custom validator triggers for slice/string formatting, schema layout, etc. - // reference operator-utils/validate/schema:NewSchemaValidator for inclusive list - err = schema.Validate(cr) - Expect(err).ToNot(HaveOccurred()) - }) -}) - -// getSchema reads in & returns CRD schema file as openAPIV3Schema{} for validation usage. -// See references operator-utils/validation/schema & go-openapi/spec/schema -func getSchema(crdPath string) (validation.Schema, error) { - bytes, err := ioutil.ReadFile(crdPath) - if err != nil { - return nil, err - } - schema, err := validation.NewVersioned(bytes, "v2") - if err != nil { - return nil, err - } - return schema, nil -} - -// getCR unmarshals a *_cr.yaml file and returns the representing struct -func getCR(crPath string) (map[string]interface{}, error) { - bytes, err := ioutil.ReadFile(crPath) - if err != nil { - return nil, err - } - var input map[string]interface{} - if err = yaml.Unmarshal(bytes, &input); err != nil { - return nil, err - } - return input, nil -} - -// getMissingEntries recursively walks schemaInstance fields (PerformanceProfile), checking that each (and its fields -// recursively) are represented in CRD (schema); returns list of missing fields with specified omissions filtered out -func getMissingEntries(schema validation.Schema, schemaInstance interface{}, omissions ...string) []validation.SchemaEntry { - missingEntries := schema.GetMissingEntries(schemaInstance) - var filtered bool - var filteredMissing []validation.SchemaEntry - for _, missing := range missingEntries { - filtered = false - for _, omit := range omissions { - if strings.HasPrefix(missing.Path, omit) { - filtered = true - break - } - } - if !filtered { - filteredMissing = append(filteredMissing, missing) - } - } - return filteredMissing -} diff --git a/pkg/apis/pao/v1/groupversion_info.go b/pkg/apis/pao/v1/groupversion_info.go deleted file mode 100644 index 0e54f45db..000000000 --- a/pkg/apis/pao/v1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1 contains API Schema definitions for the performance v1 API group -// +kubebuilder:object:generate=true -// +groupName=performance.openshift.io -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/pkg/apis/pao/v1/performanceprofile_conversion.go b/pkg/apis/pao/v1/performanceprofile_conversion.go deleted file mode 100644 index 776708923..000000000 --- a/pkg/apis/pao/v1/performanceprofile_conversion.go +++ /dev/null @@ -1,4 +0,0 @@ -package v1 - -// Hub marks this type as a conversion hub. -func (*PerformanceProfile) Hub() {} diff --git a/pkg/apis/pao/v1/performanceprofile_types.go b/pkg/apis/pao/v1/performanceprofile_types.go deleted file mode 100644 index d12a92174..000000000 --- a/pkg/apis/pao/v1/performanceprofile_types.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's -// reconcile loop in order to perform manual changes to performance profile owned -// objects. -const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile" - -// PerformanceProfileSpec defines the desired state of PerformanceProfile. -type PerformanceProfileSpec struct { - // CPU defines a set of CPU related parameters. - CPU *CPU `json:"cpu"` - // HugePages defines a set of huge pages related parameters. - // It is possible to set huge pages with multiple size values at the same time. - // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. - // It is important to notice that setting hugepages default size to 1G will remove all 2M related - // folders from the node and it will be impossible to configure 2M hugepages under the node. - HugePages *HugePages `json:"hugepages,omitempty"` - // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be - // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"` - // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector - // of resources like KubeletConfigs created by the operator. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"` - // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. - // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool - // which targets this performance profile. - // In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format - // /: "" in order to be able to calculate the default values for the former mentioned fields. - NodeSelector map[string]string `json:"nodeSelector"` - // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"` - // Addional kernel arguments. - // +optional - AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"` - // NUMA defines options related to topology aware affinities - // +optional - NUMA *NUMA `json:"numa,omitempty"` - // Net defines a set of network related features - // +optional - Net *Net `json:"net,omitempty"` - // GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. - // When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. - // Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing - // can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. - // Defaults to "false" - // +optional - GloballyDisableIrqLoadBalancing *bool `json:"globallyDisableIrqLoadBalancing,omitempty"` -} - -// CPUSet defines the set of CPUs(0-3,8-11). -type CPUSet string - -// CPU defines a set of CPU related features. -type CPU struct { - // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - Reserved *CPUSet `json:"reserved,omitempty"` - // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, - // which means removing as many extraneous tasks off a CPU as possible. - // It is important to notice the CPU manager can choose any CPU to run the workload - // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: - // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs - // 2. The isolated CPUs field should be the complementary to reserved CPUs field - Isolated *CPUSet `json:"isolated"` - // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. - // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to - // explicitly assign each thread to a specific cpu in order to work across multiple CPUs. - // Setting this to "true" allows workloads to be balanced across CPUs. - // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it - // offloads the complexity of cpu load balancing to the application. - // Defaults to "true" - // +optional - BalanceIsolated *bool `json:"balanceIsolated,omitempty"` -} - -// HugePageSize defines size of huge pages, can be 2M or 1G. -type HugePageSize string - -// HugePages defines a set of huge pages that we want to allocate at boot. -type HugePages struct { - // DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"` - // Pages defines huge pages that we want to allocate at boot time. - Pages []HugePage `json:"pages,omitempty"` -} - -// HugePage defines the number of allocated huge pages of the specific size. -type HugePage struct { - // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - Size HugePageSize `json:"size,omitempty"` - // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - Count int32 `json:"count,omitempty"` - // Node defines the NUMA node where hugepages will be allocated, - // if not specified, pages will be allocated equally between NUMA nodes - // +optional - Node *int32 `json:"node,omitempty"` -} - -// NUMA defines parameters related to topology awareness and affinity. -type NUMA struct { - // Name of the policy applied when TopologyManager is enabled - // Operator defaults to "best-effort" - // +optional - TopologyPolicy *string `json:"topologyPolicy,omitempty"` -} - -// Net defines a set of network related features -type Net struct { - // UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false". - UserLevelNetworking *bool `json:"userLevelNetworking,omitempty"` - // Devices contains a list of network device representations that will be - // set with a netqueue count equal to CPU.Reserved . - // If no devices are specified then the default is all devices. - Devices []Device `json:"devices,omitempty"` -} - -// Device defines a way to represent a network device in several options: -// device name, vendor ID, model ID, PCI path and MAC address -type Device struct { - // Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative. - // +optional - InterfaceName *string `json:"interfaceName,omitempty"` - // Network device vendor ID represnted as a 16 bit Hexmadecimal number. - // +optional - VendorID *string `json:"vendorID,omitempty"` - // Network device ID (model) represnted as a 16 bit hexmadecimal number. - // +optional - DeviceID *string `json:"deviceID,omitempty"` -} - -// RealTimeKernel defines the set of parameters relevant for the real time kernel. -type RealTimeKernel struct { - // Enabled defines if the real time kernel packages should be installed. Defaults to "false" - Enabled *bool `json:"enabled,omitempty"` -} - -// PerformanceProfileStatus defines the observed state of PerformanceProfile. -type PerformanceProfileStatus struct { - // Conditions represents the latest available observations of current state. - // +optional - Conditions []conditionsv1.Condition `json:"conditions,omitempty"` - // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - // +optional - Tuned *string `json:"tuned,omitempty"` - // RuntimeClass contains the name of the RuntimeClass resource created by the operator. - RuntimeClass *string `json:"runtimeClass,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=performanceprofiles,scope=Cluster -// +kubebuilder:deprecatedversion:warning="v1 is deprecated and should be removed in next three releases, use v2 instead" - -// PerformanceProfile is the Schema for the performanceprofiles API -type PerformanceProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PerformanceProfileSpec `json:"spec,omitempty"` - Status PerformanceProfileStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PerformanceProfileList contains a list of PerformanceProfile -type PerformanceProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PerformanceProfile `json:"items"` -} - -func init() { - SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{}) -} diff --git a/pkg/apis/pao/v1/performanceprofile_webhook.go b/pkg/apis/pao/v1/performanceprofile_webhook.go deleted file mode 100644 index c3af72fef..000000000 --- a/pkg/apis/pao/v1/performanceprofile_webhook.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1 - -import ( - ctrl "sigs.k8s.io/controller-runtime" -) - -// SetupWebhookWithManager enables Webhooks - needed for version conversion -func (r *PerformanceProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} diff --git a/pkg/apis/pao/v1/zz_generated.deepcopy.go b/pkg/apis/pao/v1/zz_generated.deepcopy.go deleted file mode 100644 index f5eeee6da..000000000 --- a/pkg/apis/pao/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,363 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CPU) DeepCopyInto(out *CPU) { - *out = *in - if in.Reserved != nil { - in, out := &in.Reserved, &out.Reserved - *out = new(CPUSet) - **out = **in - } - if in.Isolated != nil { - in, out := &in.Isolated, &out.Isolated - *out = new(CPUSet) - **out = **in - } - if in.BalanceIsolated != nil { - in, out := &in.BalanceIsolated, &out.BalanceIsolated - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. -func (in *CPU) DeepCopy() *CPU { - if in == nil { - return nil - } - out := new(CPU) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Device) DeepCopyInto(out *Device) { - *out = *in - if in.InterfaceName != nil { - in, out := &in.InterfaceName, &out.InterfaceName - *out = new(string) - **out = **in - } - if in.VendorID != nil { - in, out := &in.VendorID, &out.VendorID - *out = new(string) - **out = **in - } - if in.DeviceID != nil { - in, out := &in.DeviceID, &out.DeviceID - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. -func (in *Device) DeepCopy() *Device { - if in == nil { - return nil - } - out := new(Device) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePage) DeepCopyInto(out *HugePage) { - *out = *in - if in.Node != nil { - in, out := &in.Node, &out.Node - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage. -func (in *HugePage) DeepCopy() *HugePage { - if in == nil { - return nil - } - out := new(HugePage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePages) DeepCopyInto(out *HugePages) { - *out = *in - if in.DefaultHugePagesSize != nil { - in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize - *out = new(HugePageSize) - **out = **in - } - if in.Pages != nil { - in, out := &in.Pages, &out.Pages - *out = make([]HugePage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages. -func (in *HugePages) DeepCopy() *HugePages { - if in == nil { - return nil - } - out := new(HugePages) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NUMA) DeepCopyInto(out *NUMA) { - *out = *in - if in.TopologyPolicy != nil { - in, out := &in.TopologyPolicy, &out.TopologyPolicy - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA. -func (in *NUMA) DeepCopy() *NUMA { - if in == nil { - return nil - } - out := new(NUMA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Net) DeepCopyInto(out *Net) { - *out = *in - if in.UserLevelNetworking != nil { - in, out := &in.UserLevelNetworking, &out.UserLevelNetworking - *out = new(bool) - **out = **in - } - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]Device, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Net. -func (in *Net) DeepCopy() *Net { - if in == nil { - return nil - } - out := new(Net) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile. -func (in *PerformanceProfile) DeepCopy() *PerformanceProfile { - if in == nil { - return nil - } - out := new(PerformanceProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PerformanceProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList. -func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList { - if in == nil { - return nil - } - out := new(PerformanceProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) { - *out = *in - if in.CPU != nil { - in, out := &in.CPU, &out.CPU - *out = new(CPU) - (*in).DeepCopyInto(*out) - } - if in.HugePages != nil { - in, out := &in.HugePages, &out.HugePages - *out = new(HugePages) - (*in).DeepCopyInto(*out) - } - if in.MachineConfigLabel != nil { - in, out := &in.MachineConfigLabel, &out.MachineConfigLabel - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MachineConfigPoolSelector != nil { - in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.RealTimeKernel != nil { - in, out := &in.RealTimeKernel, &out.RealTimeKernel - *out = new(RealTimeKernel) - (*in).DeepCopyInto(*out) - } - if in.AdditionalKernelArgs != nil { - in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NUMA != nil { - in, out := &in.NUMA, &out.NUMA - *out = new(NUMA) - (*in).DeepCopyInto(*out) - } - if in.Net != nil { - in, out := &in.Net, &out.Net - *out = new(Net) - (*in).DeepCopyInto(*out) - } - if in.GloballyDisableIrqLoadBalancing != nil { - in, out := &in.GloballyDisableIrqLoadBalancing, &out.GloballyDisableIrqLoadBalancing - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec. -func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec { - if in == nil { - return nil - } - out := new(PerformanceProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]conditionsv1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tuned != nil { - in, out := &in.Tuned, &out.Tuned - *out = new(string) - **out = **in - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus. -func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus { - if in == nil { - return nil - } - out := new(PerformanceProfileStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel. -func (in *RealTimeKernel) DeepCopy() *RealTimeKernel { - if in == nil { - return nil - } - out := new(RealTimeKernel) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/pao/v1alpha1/groupversion_info.go b/pkg/apis/pao/v1alpha1/groupversion_info.go deleted file mode 100644 index 031ebdc99..000000000 --- a/pkg/apis/pao/v1alpha1/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v1alpha1 contains API Schema definitions for the performance v1alpha1 API group -// +kubebuilder:object:generate=true -// +groupName=performance.openshift.io -package v1alpha1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v1alpha1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go b/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go deleted file mode 100644 index c27a8073f..000000000 --- a/pkg/apis/pao/v1alpha1/performanceprofile_conversion.go +++ /dev/null @@ -1,221 +0,0 @@ -package v1alpha1 - -import ( - "k8s.io/utils/pointer" - - v1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - - "sigs.k8s.io/controller-runtime/pkg/conversion" -) - -// ConvertTo converts this PerformanceProfile to the Hub version (v1). -func (curr *PerformanceProfile) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1.PerformanceProfile) - - // ObjectMeta - dst.ObjectMeta = curr.ObjectMeta - - // Spec - if curr.Spec.CPU != nil { - dst.Spec.CPU = new(v1.CPU) - - if curr.Spec.CPU.Reserved != nil { - reserved := v1.CPUSet(*curr.Spec.CPU.Reserved) - dst.Spec.CPU.Reserved = &reserved - } - if curr.Spec.CPU.Isolated != nil { - isolated := v1.CPUSet(*curr.Spec.CPU.Isolated) - dst.Spec.CPU.Isolated = &isolated - } - if curr.Spec.CPU.BalanceIsolated != nil { - dst.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*curr.Spec.CPU.BalanceIsolated) - } - } - - if curr.Spec.HugePages != nil { - dst.Spec.HugePages = new(v1.HugePages) - - if curr.Spec.HugePages.DefaultHugePagesSize != nil { - defaultHugePagesSize := v1.HugePageSize(*curr.Spec.HugePages.DefaultHugePagesSize) - dst.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize - } - - if curr.Spec.HugePages.Pages != nil { - dst.Spec.HugePages.Pages = make([]v1.HugePage, len(curr.Spec.HugePages.Pages)) - - for i, p := range curr.Spec.HugePages.Pages { - dst.Spec.HugePages.Pages[i] = v1.HugePage{ - Size: v1.HugePageSize(p.Size), Count: p.Count, - } - if p.Node != nil { - dst.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node) - } - } - } - } - - if curr.Spec.MachineConfigLabel != nil { - dst.Spec.MachineConfigLabel = make(map[string]string) - for k, v := range curr.Spec.MachineConfigLabel { - dst.Spec.MachineConfigLabel[k] = v - } - } - - if curr.Spec.MachineConfigPoolSelector != nil { - dst.Spec.MachineConfigPoolSelector = make(map[string]string) - for k, v := range curr.Spec.MachineConfigPoolSelector { - dst.Spec.MachineConfigPoolSelector[k] = v - } - } - - if curr.Spec.NodeSelector != nil { - dst.Spec.NodeSelector = make(map[string]string) - for k, v := range curr.Spec.NodeSelector { - dst.Spec.NodeSelector[k] = v - } - } - - if curr.Spec.RealTimeKernel != nil { - dst.Spec.RealTimeKernel = new(v1.RealTimeKernel) - - if curr.Spec.RealTimeKernel.Enabled != nil { - dst.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*curr.Spec.RealTimeKernel.Enabled) - } - } - - if curr.Spec.AdditionalKernelArgs != nil { - dst.Spec.AdditionalKernelArgs = make([]string, len(curr.Spec.AdditionalKernelArgs)) - copy(dst.Spec.AdditionalKernelArgs, curr.Spec.AdditionalKernelArgs) - } - - if curr.Spec.NUMA != nil { - dst.Spec.NUMA = new(v1.NUMA) - - if curr.Spec.NUMA.TopologyPolicy != nil { - dst.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*curr.Spec.NUMA.TopologyPolicy) - } - } - - // Status - if curr.Status.Conditions != nil { - dst.Status.Conditions = make([]conditionsv1.Condition, len(curr.Status.Conditions)) - copy(dst.Status.Conditions, curr.Status.Conditions) - } - - if curr.Status.Tuned != nil { - dst.Status.Tuned = pointer.StringPtr(*curr.Status.Tuned) - } - - if curr.Status.RuntimeClass != nil { - dst.Status.RuntimeClass = pointer.StringPtr(*curr.Status.RuntimeClass) - } - - // +kubebuilder:docs-gen:collapse=rote conversion - return nil -} - -// ConvertFrom converts from the Hub version (v1) to this version. -func (curr *PerformanceProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1.PerformanceProfile) - - // ObjectMeta - curr.ObjectMeta = src.ObjectMeta - - // Spec - if src.Spec.CPU != nil { - curr.Spec.CPU = new(CPU) - - if src.Spec.CPU.Reserved != nil { - reserved := CPUSet(*src.Spec.CPU.Reserved) - curr.Spec.CPU.Reserved = &reserved - } - if src.Spec.CPU.Isolated != nil { - isolated := CPUSet(*src.Spec.CPU.Isolated) - curr.Spec.CPU.Isolated = &isolated - } - if src.Spec.CPU.BalanceIsolated != nil { - curr.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*src.Spec.CPU.BalanceIsolated) - } - } - - if src.Spec.HugePages != nil { - curr.Spec.HugePages = new(HugePages) - - if src.Spec.HugePages.DefaultHugePagesSize != nil { - defaultHugePagesSize := HugePageSize(*src.Spec.HugePages.DefaultHugePagesSize) - curr.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize - } - - if src.Spec.HugePages.Pages != nil { - curr.Spec.HugePages.Pages = make([]HugePage, len(src.Spec.HugePages.Pages)) - for i, p := range src.Spec.HugePages.Pages { - curr.Spec.HugePages.Pages[i] = HugePage{ - Size: HugePageSize(p.Size), Count: p.Count, - } - if p.Node != nil { - curr.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node) - } - } - } - } - - if src.Spec.MachineConfigLabel != nil { - curr.Spec.MachineConfigLabel = make(map[string]string) - for k, v := range src.Spec.MachineConfigLabel { - curr.Spec.MachineConfigLabel[k] = v - } - } - - if src.Spec.MachineConfigPoolSelector != nil { - curr.Spec.MachineConfigPoolSelector = make(map[string]string) - for k, v := range src.Spec.MachineConfigPoolSelector { - curr.Spec.MachineConfigPoolSelector[k] = v - } - } - - if src.Spec.NodeSelector != nil { - curr.Spec.NodeSelector = make(map[string]string) - for k, v := range src.Spec.NodeSelector { - curr.Spec.NodeSelector[k] = v - } - } - - if src.Spec.RealTimeKernel != nil { - curr.Spec.RealTimeKernel = new(RealTimeKernel) - - if src.Spec.RealTimeKernel.Enabled != nil { - curr.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*src.Spec.RealTimeKernel.Enabled) - } - } - - if src.Spec.AdditionalKernelArgs != nil { - curr.Spec.AdditionalKernelArgs = make([]string, len(src.Spec.AdditionalKernelArgs)) - copy(curr.Spec.AdditionalKernelArgs, src.Spec.AdditionalKernelArgs) - } - - if src.Spec.NUMA != nil { - curr.Spec.NUMA = new(NUMA) - - if src.Spec.NUMA.TopologyPolicy != nil { - curr.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*src.Spec.NUMA.TopologyPolicy) - } - } - - // Status - if src.Status.Conditions != nil { - curr.Status.Conditions = make([]conditionsv1.Condition, len(src.Status.Conditions)) - copy(curr.Status.Conditions, src.Status.Conditions) - } - - if src.Status.Tuned != nil { - curr.Status.Tuned = pointer.StringPtr(*src.Status.Tuned) - } - - if src.Status.RuntimeClass != nil { - curr.Status.RuntimeClass = pointer.StringPtr(*src.Status.RuntimeClass) - } - - // +kubebuilder:docs-gen:collapse=rote conversion - return nil -} diff --git a/pkg/apis/pao/v1alpha1/performanceprofile_types.go b/pkg/apis/pao/v1alpha1/performanceprofile_types.go deleted file mode 100644 index e56c3aabf..000000000 --- a/pkg/apis/pao/v1alpha1/performanceprofile_types.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's -// reconcile loop in order to perform manual changes to performance profile owned -// objects. -const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile" - -// PerformanceProfileSpec defines the desired state of PerformanceProfile. -type PerformanceProfileSpec struct { - // CPU defines a set of CPU related parameters. - CPU *CPU `json:"cpu,omitempty"` - // HugePages defines a set of huge pages related parameters. - // It is possible to set huge pages with multiple size values at the same time. - // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. - // It is important to notice that setting hugepages default size to 1G will remove all 2M related - // folders from the node and it will be impossible to configure 2M hugepages under the node. - HugePages *HugePages `json:"hugepages,omitempty"` - // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be - // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"` - // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector - // of resources like KubeletConfigs created by the operator. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"` - // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. - // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool - // which targets this performance profile. - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"` - // Addional kernel arguments. - // +optional - AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"` - // NUMA defines options related to topology aware affinities - // +optional - NUMA *NUMA `json:"numa,omitempty"` -} - -// CPUSet defines the set of CPUs(0-3,8-11). -type CPUSet string - -// CPU defines a set of CPU related features. -type CPU struct { - // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - Reserved *CPUSet `json:"reserved,omitempty"` - // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, - // which means removing as many extraneous tasks off a CPU as possible. - // It is important to notice the CPU manager can choose any CPU to run the workload - // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: - // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs - // 2. The isolated CPUs field should be the complementary to reserved CPUs field - // +optional - Isolated *CPUSet `json:"isolated,omitempty"` - // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. - // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to - // explicitly assign each thread to a specific cpu in order to work across multiple CPUs. - // Setting this to "true" allows workloads to be balanced across CPUs. - // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it - // offloads the complexity of cpu load balancing to the application. - // Defaults to "true" - // +optional - BalanceIsolated *bool `json:"balanceIsolated,omitempty"` -} - -// HugePageSize defines size of huge pages, can be 2M or 1G. -type HugePageSize string - -// HugePages defines a set of huge pages that we want to allocate at boot. -type HugePages struct { - // DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"` - // Pages defines huge pages that we want to allocate at boot time. - Pages []HugePage `json:"pages,omitempty"` -} - -// HugePage defines the number of allocated huge pages of the specific size. -type HugePage struct { - // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - Size HugePageSize `json:"size,omitempty"` - // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - Count int32 `json:"count,omitempty"` - // Node defines the NUMA node where hugepages will be allocated, - // if not specified, pages will be allocated equally between NUMA nodes - // +optional - Node *int32 `json:"node,omitempty"` -} - -// NUMA defines parameters related to topology awareness and affinity. -type NUMA struct { - // Name of the policy applied when TopologyManager is enabled - // Operator defaults to "best-effort" - // +optional - TopologyPolicy *string `json:"topologyPolicy,omitempty"` -} - -// RealTimeKernel defines the set of parameters relevant for the real time kernel. -type RealTimeKernel struct { - // Enabled defines if the real time kernel packages should be installed. Defaults to "false" - Enabled *bool `json:"enabled,omitempty"` -} - -// PerformanceProfileStatus defines the observed state of PerformanceProfile. -type PerformanceProfileStatus struct { - // Conditions represents the latest available observations of current state. - // +optional - Conditions []conditionsv1.Condition `json:"conditions,omitempty"` - // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - // +optional - Tuned *string `json:"tuned,omitempty"` - // RuntimeClass contains the name of the RuntimeClass resource created by the operator. - RuntimeClass *string `json:"runtimeClass,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=performanceprofiles,scope=Cluster -// +kubebuilder:deprecatedversion:warning="v1alpha1 is deprecated and should be removed in the next release, use v2 instead" - -// PerformanceProfile is the Schema for the performanceprofiles API -type PerformanceProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PerformanceProfileSpec `json:"spec,omitempty"` - Status PerformanceProfileStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PerformanceProfileList contains a list of PerformanceProfile -type PerformanceProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PerformanceProfile `json:"items"` -} - -func init() { - SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{}) -} diff --git a/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index e99aecf49..000000000 --- a/pkg/apis/pao/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,296 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "github.com/openshift/custom-resource-status/conditions/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CPU) DeepCopyInto(out *CPU) { - *out = *in - if in.Reserved != nil { - in, out := &in.Reserved, &out.Reserved - *out = new(CPUSet) - **out = **in - } - if in.Isolated != nil { - in, out := &in.Isolated, &out.Isolated - *out = new(CPUSet) - **out = **in - } - if in.BalanceIsolated != nil { - in, out := &in.BalanceIsolated, &out.BalanceIsolated - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. -func (in *CPU) DeepCopy() *CPU { - if in == nil { - return nil - } - out := new(CPU) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePage) DeepCopyInto(out *HugePage) { - *out = *in - if in.Node != nil { - in, out := &in.Node, &out.Node - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage. -func (in *HugePage) DeepCopy() *HugePage { - if in == nil { - return nil - } - out := new(HugePage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePages) DeepCopyInto(out *HugePages) { - *out = *in - if in.DefaultHugePagesSize != nil { - in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize - *out = new(HugePageSize) - **out = **in - } - if in.Pages != nil { - in, out := &in.Pages, &out.Pages - *out = make([]HugePage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages. -func (in *HugePages) DeepCopy() *HugePages { - if in == nil { - return nil - } - out := new(HugePages) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NUMA) DeepCopyInto(out *NUMA) { - *out = *in - if in.TopologyPolicy != nil { - in, out := &in.TopologyPolicy, &out.TopologyPolicy - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA. -func (in *NUMA) DeepCopy() *NUMA { - if in == nil { - return nil - } - out := new(NUMA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile. -func (in *PerformanceProfile) DeepCopy() *PerformanceProfile { - if in == nil { - return nil - } - out := new(PerformanceProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PerformanceProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList. -func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList { - if in == nil { - return nil - } - out := new(PerformanceProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) { - *out = *in - if in.CPU != nil { - in, out := &in.CPU, &out.CPU - *out = new(CPU) - (*in).DeepCopyInto(*out) - } - if in.HugePages != nil { - in, out := &in.HugePages, &out.HugePages - *out = new(HugePages) - (*in).DeepCopyInto(*out) - } - if in.MachineConfigLabel != nil { - in, out := &in.MachineConfigLabel, &out.MachineConfigLabel - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MachineConfigPoolSelector != nil { - in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.RealTimeKernel != nil { - in, out := &in.RealTimeKernel, &out.RealTimeKernel - *out = new(RealTimeKernel) - (*in).DeepCopyInto(*out) - } - if in.AdditionalKernelArgs != nil { - in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NUMA != nil { - in, out := &in.NUMA, &out.NUMA - *out = new(NUMA) - (*in).DeepCopyInto(*out) - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec. -func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec { - if in == nil { - return nil - } - out := new(PerformanceProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tuned != nil { - in, out := &in.Tuned, &out.Tuned - *out = new(string) - **out = **in - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus. -func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus { - if in == nil { - return nil - } - out := new(PerformanceProfileStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel. -func (in *RealTimeKernel) DeepCopy() *RealTimeKernel { - if in == nil { - return nil - } - out := new(RealTimeKernel) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/apis/pao/v2/groupversion_info.go b/pkg/apis/pao/v2/groupversion_info.go deleted file mode 100644 index 694319047..000000000 --- a/pkg/apis/pao/v2/groupversion_info.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package v2 contains API Schema definitions for the performance v2 API group -// +kubebuilder:object:generate=true -// +groupName=performance.openshift.io -package v2 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" -) - -var ( - // GroupVersion is group version used to register these objects - GroupVersion = schema.GroupVersion{Group: "performance.openshift.io", Version: "v2"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} - - // AddToScheme adds the types in this group-version to the given scheme. - AddToScheme = SchemeBuilder.AddToScheme -) diff --git a/pkg/apis/pao/v2/performanceprofile_conversion.go b/pkg/apis/pao/v2/performanceprofile_conversion.go deleted file mode 100644 index a2292a746..000000000 --- a/pkg/apis/pao/v2/performanceprofile_conversion.go +++ /dev/null @@ -1,293 +0,0 @@ -package v2 - -import ( - "k8s.io/utils/pointer" - - v1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - - "sigs.k8s.io/controller-runtime/pkg/conversion" -) - -// ConvertTo converts this PerformanceProfile to the Hub version (v1). -func (curr *PerformanceProfile) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1.PerformanceProfile) - - // ObjectMeta - dst.ObjectMeta = curr.ObjectMeta - - // Spec - if curr.Spec.CPU != nil { - dst.Spec.CPU = new(v1.CPU) - - if curr.Spec.CPU.Reserved != nil { - reserved := v1.CPUSet(*curr.Spec.CPU.Reserved) - dst.Spec.CPU.Reserved = &reserved - } - if curr.Spec.CPU.Isolated != nil { - isolated := v1.CPUSet(*curr.Spec.CPU.Isolated) - dst.Spec.CPU.Isolated = &isolated - } - if curr.Spec.CPU.BalanceIsolated != nil { - dst.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*curr.Spec.CPU.BalanceIsolated) - } - } - - if curr.Spec.HugePages != nil { - dst.Spec.HugePages = new(v1.HugePages) - - if curr.Spec.HugePages.DefaultHugePagesSize != nil { - defaultHugePagesSize := v1.HugePageSize(*curr.Spec.HugePages.DefaultHugePagesSize) - dst.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize - } - - if curr.Spec.HugePages.Pages != nil { - dst.Spec.HugePages.Pages = make([]v1.HugePage, len(curr.Spec.HugePages.Pages)) - - for i, p := range curr.Spec.HugePages.Pages { - dst.Spec.HugePages.Pages[i] = v1.HugePage{ - Size: v1.HugePageSize(p.Size), Count: p.Count, - } - if p.Node != nil { - dst.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node) - } - } - } - } - - if curr.Spec.MachineConfigLabel != nil { - dst.Spec.MachineConfigLabel = make(map[string]string) - for k, v := range curr.Spec.MachineConfigLabel { - dst.Spec.MachineConfigLabel[k] = v - } - } - - if curr.Spec.MachineConfigPoolSelector != nil { - dst.Spec.MachineConfigPoolSelector = make(map[string]string) - for k, v := range curr.Spec.MachineConfigPoolSelector { - dst.Spec.MachineConfigPoolSelector[k] = v - } - } - - if curr.Spec.NodeSelector != nil { - dst.Spec.NodeSelector = make(map[string]string) - for k, v := range curr.Spec.NodeSelector { - dst.Spec.NodeSelector[k] = v - } - } - - if curr.Spec.RealTimeKernel != nil { - dst.Spec.RealTimeKernel = new(v1.RealTimeKernel) - - if curr.Spec.RealTimeKernel.Enabled != nil { - dst.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*curr.Spec.RealTimeKernel.Enabled) - } - } - - if curr.Spec.AdditionalKernelArgs != nil { - dst.Spec.AdditionalKernelArgs = make([]string, len(curr.Spec.AdditionalKernelArgs)) - copy(dst.Spec.AdditionalKernelArgs, curr.Spec.AdditionalKernelArgs) - } - - if curr.Spec.NUMA != nil { - dst.Spec.NUMA = new(v1.NUMA) - - if curr.Spec.NUMA.TopologyPolicy != nil { - dst.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*curr.Spec.NUMA.TopologyPolicy) - } - } - - // Convert Net fields - if curr.Spec.Net != nil { - dst.Spec.Net = new(v1.Net) - - if curr.Spec.Net.UserLevelNetworking != nil { - dst.Spec.Net.UserLevelNetworking = pointer.BoolPtr(*curr.Spec.Net.UserLevelNetworking) - } - - if curr.Spec.Net.Devices != nil { - dst.Spec.Net.Devices = []v1.Device{} - - for _, d := range curr.Spec.Net.Devices { - device := v1.Device{} - - if d.VendorID != nil { - device.VendorID = pointer.StringPtr(*d.VendorID) - } - - if d.DeviceID != nil { - device.DeviceID = pointer.StringPtr(*d.DeviceID) - } - - if d.InterfaceName != nil { - device.InterfaceName = pointer.StringPtr(*d.InterfaceName) - } - - dst.Spec.Net.Devices = append(dst.Spec.Net.Devices, device) - } - } - } - - if curr.Spec.GloballyDisableIrqLoadBalancing != nil { - dst.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(*curr.Spec.GloballyDisableIrqLoadBalancing) - } - - // Status - if curr.Status.Conditions != nil { - dst.Status.Conditions = make([]conditionsv1.Condition, len(curr.Status.Conditions)) - copy(dst.Status.Conditions, curr.Status.Conditions) - } - - if curr.Status.Tuned != nil { - dst.Status.Tuned = pointer.StringPtr(*curr.Status.Tuned) - } - - if curr.Status.RuntimeClass != nil { - dst.Status.RuntimeClass = pointer.StringPtr(*curr.Status.RuntimeClass) - } - - // +kubebuilder:docs-gen:collapse=rote conversion - return nil -} - -// ConvertFrom converts from the Hub version (v1) to this version. -func (curr *PerformanceProfile) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1.PerformanceProfile) - - // ObjectMeta - curr.ObjectMeta = src.ObjectMeta - - // Spec - if src.Spec.CPU != nil { - curr.Spec.CPU = new(CPU) - - if src.Spec.CPU.Reserved != nil { - reserved := CPUSet(*src.Spec.CPU.Reserved) - curr.Spec.CPU.Reserved = &reserved - } - if src.Spec.CPU.Isolated != nil { - isolated := CPUSet(*src.Spec.CPU.Isolated) - curr.Spec.CPU.Isolated = &isolated - } - if src.Spec.CPU.BalanceIsolated != nil { - curr.Spec.CPU.BalanceIsolated = pointer.BoolPtr(*src.Spec.CPU.BalanceIsolated) - } - } - - if src.Spec.HugePages != nil { - curr.Spec.HugePages = new(HugePages) - - if src.Spec.HugePages.DefaultHugePagesSize != nil { - defaultHugePagesSize := HugePageSize(*src.Spec.HugePages.DefaultHugePagesSize) - curr.Spec.HugePages.DefaultHugePagesSize = &defaultHugePagesSize - } - - if src.Spec.HugePages.Pages != nil { - curr.Spec.HugePages.Pages = make([]HugePage, len(src.Spec.HugePages.Pages)) - for i, p := range src.Spec.HugePages.Pages { - curr.Spec.HugePages.Pages[i] = HugePage{ - Size: HugePageSize(p.Size), Count: p.Count, - } - if p.Node != nil { - curr.Spec.HugePages.Pages[i].Node = pointer.Int32Ptr(*p.Node) - } - } - } - } - - if src.Spec.MachineConfigLabel != nil { - curr.Spec.MachineConfigLabel = make(map[string]string) - for k, v := range src.Spec.MachineConfigLabel { - curr.Spec.MachineConfigLabel[k] = v - } - } - - if src.Spec.MachineConfigPoolSelector != nil { - curr.Spec.MachineConfigPoolSelector = make(map[string]string) - for k, v := range src.Spec.MachineConfigPoolSelector { - curr.Spec.MachineConfigPoolSelector[k] = v - } - } - - if src.Spec.NodeSelector != nil { - curr.Spec.NodeSelector = make(map[string]string) - for k, v := range src.Spec.NodeSelector { - curr.Spec.NodeSelector[k] = v - } - } - - if src.Spec.RealTimeKernel != nil { - curr.Spec.RealTimeKernel = new(RealTimeKernel) - - if src.Spec.RealTimeKernel.Enabled != nil { - curr.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(*src.Spec.RealTimeKernel.Enabled) - } - } - - if src.Spec.AdditionalKernelArgs != nil { - curr.Spec.AdditionalKernelArgs = make([]string, len(src.Spec.AdditionalKernelArgs)) - copy(curr.Spec.AdditionalKernelArgs, src.Spec.AdditionalKernelArgs) - } - - if src.Spec.NUMA != nil { - curr.Spec.NUMA = new(NUMA) - - if src.Spec.NUMA.TopologyPolicy != nil { - curr.Spec.NUMA.TopologyPolicy = pointer.StringPtr(*src.Spec.NUMA.TopologyPolicy) - } - } - - // Convert Net fields - if src.Spec.Net != nil { - curr.Spec.Net = new(Net) - - if src.Spec.Net.UserLevelNetworking != nil { - curr.Spec.Net.UserLevelNetworking = pointer.BoolPtr(*src.Spec.Net.UserLevelNetworking) - } - - if src.Spec.Net.Devices != nil { - curr.Spec.Net.Devices = []Device{} - - for _, d := range src.Spec.Net.Devices { - device := Device{} - - if d.VendorID != nil { - device.VendorID = pointer.StringPtr(*d.VendorID) - } - - if d.DeviceID != nil { - device.DeviceID = pointer.StringPtr(*d.DeviceID) - } - - if d.InterfaceName != nil { - device.InterfaceName = pointer.StringPtr(*d.InterfaceName) - } - - curr.Spec.Net.Devices = append(curr.Spec.Net.Devices, device) - } - } - } - - if src.Spec.GloballyDisableIrqLoadBalancing != nil { - curr.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(*src.Spec.GloballyDisableIrqLoadBalancing) - } else { // set to true by default - curr.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(true) - } - - // Status - if src.Status.Conditions != nil { - curr.Status.Conditions = make([]conditionsv1.Condition, len(src.Status.Conditions)) - copy(curr.Status.Conditions, src.Status.Conditions) - } - - if src.Status.Tuned != nil { - curr.Status.Tuned = pointer.StringPtr(*src.Status.Tuned) - } - - if src.Status.RuntimeClass != nil { - curr.Status.RuntimeClass = pointer.StringPtr(*src.Status.RuntimeClass) - } - - // +kubebuilder:docs-gen:collapse=rote conversion - return nil -} diff --git a/pkg/apis/pao/v2/performanceprofile_types.go b/pkg/apis/pao/v2/performanceprofile_types.go deleted file mode 100644 index 9022020a9..000000000 --- a/pkg/apis/pao/v2/performanceprofile_types.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// PerformanceProfilePauseAnnotation allows an admin to suspend the operator's -// reconcile loop in order to perform manual changes to performance profile owned -// objects. -const PerformanceProfilePauseAnnotation = "performance.openshift.io/pause-reconcile" - -// PerformanceProfileSpec defines the desired state of PerformanceProfile. -type PerformanceProfileSpec struct { - // CPU defines a set of CPU related parameters. - CPU *CPU `json:"cpu"` - // HugePages defines a set of huge pages related parameters. - // It is possible to set huge pages with multiple size values at the same time. - // For example, hugepages can be set with 1G and 2M, both values will be set on the node by the performance-addon-operator. - // It is important to notice that setting hugepages default size to 1G will remove all 2M related - // folders from the node and it will be impossible to configure 2M hugepages under the node. - HugePages *HugePages `json:"hugepages,omitempty"` - // MachineConfigLabel defines the label to add to the MachineConfigs the operator creates. It has to be - // used in the MachineConfigSelector of the MachineConfigPool which targets this performance profile. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigLabel map[string]string `json:"machineConfigLabel,omitempty"` - // MachineConfigPoolSelector defines the MachineConfigPool label to use in the MachineConfigPoolSelector - // of resources like KubeletConfigs created by the operator. - // Defaults to "machineconfiguration.openshift.io/role=" - // +optional - MachineConfigPoolSelector map[string]string `json:"machineConfigPoolSelector,omitempty"` - // NodeSelector defines the Node label to use in the NodeSelectors of resources like Tuned created by the operator. - // It most likely should, but does not have to match the node label in the NodeSelector of the MachineConfigPool - // which targets this performance profile. - // In the case when machineConfigLabels or machineConfigPoolSelector are not set, we are expecting a certain NodeSelector format - // /: "" in order to be able to calculate the default values for the former mentioned fields. - NodeSelector map[string]string `json:"nodeSelector"` - // RealTimeKernel defines a set of real time kernel related parameters. RT kernel won't be installed when not set. - RealTimeKernel *RealTimeKernel `json:"realTimeKernel,omitempty"` - // Addional kernel arguments. - // +optional - AdditionalKernelArgs []string `json:"additionalKernelArgs,omitempty"` - // NUMA defines options related to topology aware affinities - // +optional - NUMA *NUMA `json:"numa,omitempty"` - // Net defines a set of network related features - // +optional - Net *Net `json:"net,omitempty"` - // GloballyDisableIrqLoadBalancing toggles whether IRQ load balancing will be disabled for the Isolated CPU set. - // When the option is set to "true" it disables IRQs load balancing for the Isolated CPU set. - // Setting the option to "false" allows the IRQs to be balanced across all CPUs, however the IRQs load balancing - // can be disabled per pod CPUs when using irq-load-balancing.crio.io/cpu-quota.crio.io annotations. - // Defaults to "false" - // +optional - GloballyDisableIrqLoadBalancing *bool `json:"globallyDisableIrqLoadBalancing,omitempty"` -} - -// CPUSet defines the set of CPUs(0-3,8-11). -type CPUSet string - -// CPU defines a set of CPU related features. -type CPU struct { - // Reserved defines a set of CPUs that will not be used for any container workloads initiated by kubelet. - Reserved *CPUSet `json:"reserved"` - // Isolated defines a set of CPUs that will be used to give to application threads the most execution time possible, - // which means removing as many extraneous tasks off a CPU as possible. - // It is important to notice the CPU manager can choose any CPU to run the workload - // except the reserved CPUs. In order to guarantee that your workload will run on the isolated CPU: - // 1. The union of reserved CPUs and isolated CPUs should include all online CPUs - // 2. The isolated CPUs field should be the complementary to reserved CPUs field - Isolated *CPUSet `json:"isolated"` - // BalanceIsolated toggles whether or not the Isolated CPU set is eligible for load balancing work loads. - // When this option is set to "false", the Isolated CPU set will be static, meaning workloads have to - // explicitly assign each thread to a specific cpu in order to work across multiple CPUs. - // Setting this to "true" allows workloads to be balanced across CPUs. - // Setting this to "false" offers the most predictable performance for guaranteed workloads, but it - // offloads the complexity of cpu load balancing to the application. - // Defaults to "true" - // +optional - BalanceIsolated *bool `json:"balanceIsolated,omitempty"` -} - -// HugePageSize defines size of huge pages, can be 2M or 1G. -type HugePageSize string - -// HugePages defines a set of huge pages that we want to allocate at boot. -type HugePages struct { - // DefaultHugePagesSize defines huge pages default size under kernel boot parameters. - DefaultHugePagesSize *HugePageSize `json:"defaultHugepagesSize,omitempty"` - // Pages defines huge pages that we want to allocate at boot time. - Pages []HugePage `json:"pages,omitempty"` -} - -// HugePage defines the number of allocated huge pages of the specific size. -type HugePage struct { - // Size defines huge page size, maps to the 'hugepagesz' kernel boot parameter. - Size HugePageSize `json:"size,omitempty"` - // Count defines amount of huge pages, maps to the 'hugepages' kernel boot parameter. - Count int32 `json:"count,omitempty"` - // Node defines the NUMA node where hugepages will be allocated, - // if not specified, pages will be allocated equally between NUMA nodes - // +optional - Node *int32 `json:"node,omitempty"` -} - -// NUMA defines parameters related to topology awareness and affinity. -type NUMA struct { - // Name of the policy applied when TopologyManager is enabled - // Operator defaults to "best-effort" - // +optional - TopologyPolicy *string `json:"topologyPolicy,omitempty"` -} - -// Net defines a set of network related features -type Net struct { - // UserLevelNetworking when enabled - sets either all or specified network devices queue size to the amount of reserved CPUs. Defaults to "false". - UserLevelNetworking *bool `json:"userLevelNetworking,omitempty"` - // Devices contains a list of network device representations that will be - // set with a netqueue count equal to CPU.Reserved . - // If no devices are specified then the default is all devices. - Devices []Device `json:"devices,omitempty"` -} - -// Device defines a way to represent a network device in several options: -// device name, vendor ID, model ID, PCI path and MAC address -type Device struct { - // Network device name to be matched. It uses a syntax of shell-style wildcards which are either positive or negative. - // +optional - InterfaceName *string `json:"interfaceName,omitempty"` - // Network device vendor ID represnted as a 16 bit Hexmadecimal number. - // +optional - VendorID *string `json:"vendorID,omitempty"` - // Network device ID (model) represnted as a 16 bit hexmadecimal number. - // +optional - DeviceID *string `json:"deviceID,omitempty"` -} - -// RealTimeKernel defines the set of parameters relevant for the real time kernel. -type RealTimeKernel struct { - // Enabled defines if the real time kernel packages should be installed. Defaults to "false" - Enabled *bool `json:"enabled,omitempty"` -} - -// PerformanceProfileStatus defines the observed state of PerformanceProfile. -type PerformanceProfileStatus struct { - // Conditions represents the latest available observations of current state. - // +optional - Conditions []conditionsv1.Condition `json:"conditions,omitempty"` - // Tuned points to the Tuned custom resource object that contains the tuning values generated by this operator. - // +optional - Tuned *string `json:"tuned,omitempty"` - // RuntimeClass contains the name of the RuntimeClass resource created by the operator. - RuntimeClass *string `json:"runtimeClass,omitempty"` -} - -// +kubebuilder:object:root=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:path=performanceprofiles,scope=Cluster -// +kubebuilder:storageversion - -// PerformanceProfile is the Schema for the performanceprofiles API -type PerformanceProfile struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - Spec PerformanceProfileSpec `json:"spec,omitempty"` - Status PerformanceProfileStatus `json:"status,omitempty"` -} - -// +kubebuilder:object:root=true - -// PerformanceProfileList contains a list of PerformanceProfile -type PerformanceProfileList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` - Items []PerformanceProfile `json:"items"` -} - -func init() { - SchemeBuilder.Register(&PerformanceProfile{}, &PerformanceProfileList{}) -} diff --git a/pkg/apis/pao/v2/performanceprofile_validation.go b/pkg/apis/pao/v2/performanceprofile_validation.go deleted file mode 100644 index baf5601f6..000000000 --- a/pkg/apis/pao/v2/performanceprofile_validation.go +++ /dev/null @@ -1,291 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - -*/ - -package v2 - -import ( - "context" - "fmt" - "reflect" - "regexp" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/klog" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" -) - -const ( - hugepagesSize2M = "2M" - hugepagesSize1G = "1G" -) - -// ValidateCreate implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateCreate() error { - klog.Infof("Create validation for the performance profile %q", r.Name) - - return r.validateCreateOrUpdate() -} - -// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateUpdate(old runtime.Object) error { - klog.Infof("Update validation for the performance profile %q", r.Name) - - return r.validateCreateOrUpdate() -} - -func (r *PerformanceProfile) validateCreateOrUpdate() error { - var allErrs field.ErrorList - - // validate node selector duplication - ppList := &PerformanceProfileList{} - if err := validatorClient.List(context.TODO(), ppList); err != nil { - return apierrors.NewInternalError(err) - } - - allErrs = append(allErrs, r.validateNodeSelectorDuplication(ppList)...) - - // validate basic fields - allErrs = append(allErrs, r.validateFields()...) - - if len(allErrs) == 0 { - return nil - } - - return apierrors.NewInvalid( - schema.GroupKind{Group: "performance.openshift.io", Kind: "PerformanceProfile"}, - r.Name, allErrs) -} - -// ValidateDelete implements webhook.Validator so a webhook will be registered for the type -func (r *PerformanceProfile) ValidateDelete() error { - klog.Infof("Delete validation for the performance profile %q", r.Name) - - // TODO(user): fill in your validation logic upon object deletion. - return nil -} - -func (r *PerformanceProfile) validateNodeSelectorDuplication(ppList *PerformanceProfileList) field.ErrorList { - var allErrs field.ErrorList - - // validate node selector duplication - for _, pp := range ppList.Items { - // exclude the current profile from the check - if pp.Name == r.Name { - continue - } - - if reflect.DeepEqual(pp.Spec.NodeSelector, r.Spec.NodeSelector) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.nodeSelector"), r.Spec.NodeSelector, fmt.Sprintf("the profile has the same node selector as the performance profile %q", pp.Name))) - } - } - - return allErrs -} - -func (r *PerformanceProfile) validateFields() field.ErrorList { - var allErrs field.ErrorList - - allErrs = append(allErrs, r.validateCPUs()...) - allErrs = append(allErrs, r.validateSelectors()...) - allErrs = append(allErrs, r.validateHugePages()...) - allErrs = append(allErrs, r.validateNUMA()...) - allErrs = append(allErrs, r.validateNet()...) - - return allErrs -} - -func (r *PerformanceProfile) validateCPUs() field.ErrorList { - var allErrs field.ErrorList - - if r.Spec.CPU == nil { - allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu"), "cpu section required")) - } else { - if r.Spec.CPU.Isolated == nil { - allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu.isolated"), "isolated CPUs required")) - } - - if r.Spec.CPU.Reserved == nil { - allErrs = append(allErrs, field.Required(field.NewPath("spec.cpu.reserved"), "reserved CPUs required")) - } - - if r.Spec.CPU.Isolated != nil && r.Spec.CPU.Reserved != nil { - cpuLists, err := components.NewCPULists(string(*r.Spec.CPU.Reserved), string(*r.Spec.CPU.Isolated)) - if err != nil { - allErrs = append(allErrs, field.InternalError(field.NewPath("spec.cpu"), err)) - } - - if cpuLists != nil { - if cpuLists.CountIsolated() == 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.cpu.isolated"), r.Spec.CPU.Isolated, "isolated CPUs can not be empty")) - } - - if overlap := cpuLists.Intersect(); len(overlap) != 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.cpu"), r.Spec.CPU, fmt.Sprintf("reserved and isolated cpus overlap: %v", overlap))) - } - } - } - } - - return allErrs -} - -func (r *PerformanceProfile) validateSelectors() field.ErrorList { - var allErrs field.ErrorList - - if r.Spec.MachineConfigLabel != nil && len(r.Spec.MachineConfigLabel) > 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigLabel"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigLabel")) - } - - if r.Spec.MachineConfigPoolSelector != nil && len(r.Spec.MachineConfigPoolSelector) > 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.machineConfigPoolSelector"), r.Spec.MachineConfigLabel, "you should provide only 1 MachineConfigPoolSelector")) - } - - if r.Spec.NodeSelector == nil { - allErrs = append(allErrs, field.Required(field.NewPath("spec.nodeSelector"), "the nodeSelector required")) - } - - if len(r.Spec.NodeSelector) > 1 { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.nodeSelector"), r.Spec.NodeSelector, "you should provide ony 1 NodeSelector")) - } - - // in case MachineConfigLabels or MachineConfigPoolSelector are not set, we expect a certain format (domain/role) - // on the NodeSelector in order to be able to calculate the default values for the former metioned fields. - if r.Spec.MachineConfigLabel == nil || r.Spec.MachineConfigPoolSelector == nil { - k, _ := components.GetFirstKeyAndValue(r.Spec.NodeSelector) - if _, _, err := components.SplitLabelKey(k); err != nil { - allErrs = append(allErrs, field.Invalid( - field.NewPath("spec.nodeSelector"), - r.Spec.NodeSelector, - "machineConfigLabels or machineConfigPoolSelector are not set, but we can not set it automatically because of an invalid NodeSelector label key that can't be split into domain/role")) - } - } - - return allErrs -} - -func (r *PerformanceProfile) validateHugePages() field.ErrorList { - var allErrs field.ErrorList - - if r.Spec.HugePages == nil { - return allErrs - } - - // validate that default hugepages size has correct value, currently we support only 2M and 1G(x86_64 architecture) - if r.Spec.HugePages.DefaultHugePagesSize != nil { - defaultSize := *r.Spec.HugePages.DefaultHugePagesSize - if defaultSize != hugepagesSize1G && defaultSize != hugepagesSize2M { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.defaultHugepagesSize"), r.Spec.HugePages.DefaultHugePagesSize, fmt.Sprintf("hugepages default size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M))) - } - } - - for i, page := range r.Spec.HugePages.Pages { - if page.Size != hugepagesSize1G && page.Size != hugepagesSize2M { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M))) - } - - allErrs = append(allErrs, r.validatePageDuplication(&page, r.Spec.HugePages.Pages[i+1:])...) - } - - return allErrs -} - -func (r *PerformanceProfile) validatePageDuplication(page *HugePage, pages []HugePage) field.ErrorList { - var allErrs field.ErrorList - - for _, p := range pages { - if page.Size != p.Size { - continue - } - - if page.Node != nil && p.Node == nil { - continue - } - - if page.Node == nil && p.Node != nil { - continue - } - - if page.Node == nil && p.Node == nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", page.Size))) - continue - } - - if *page.Node == *p.Node { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.hugepages.pages"), r.Spec.HugePages.Pages, fmt.Sprintf("the page with the size %q and with specified NUMA node %d, has duplication", page.Size, *page.Node))) - } - } - - return allErrs -} - -func (r *PerformanceProfile) validateNUMA() field.ErrorList { - var allErrs field.ErrorList - - if r.Spec.NUMA == nil { - return allErrs - } - - // validate NUMA topology policy matches allowed values - if r.Spec.NUMA.TopologyPolicy != nil { - policy := *r.Spec.NUMA.TopologyPolicy - if policy != kubeletconfigv1beta1.NoneTopologyManagerPolicy && - policy != kubeletconfigv1beta1.BestEffortTopologyManagerPolicy && - policy != kubeletconfigv1beta1.RestrictedTopologyManagerPolicy && - policy != kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.numa.topologyPolicy"), r.Spec.NUMA.TopologyPolicy, "unrecognized value for topologyPolicy")) - } - } - - return allErrs -} - -func (r *PerformanceProfile) validateNet() field.ErrorList { - var allErrs field.ErrorList - - if r.Spec.Net == nil { - return allErrs - } - - if r.Spec.Net.UserLevelNetworking != nil && *r.Spec.Net.UserLevelNetworking && r.Spec.CPU.Reserved == nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net"), r.Spec.Net, "can not set network devices queues count without specifiying spec.cpu.reserved")) - } - - for _, device := range r.Spec.Net.Devices { - if device.InterfaceName != nil && *device.InterfaceName == "" { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, "device name cannot be empty")) - } - if device.VendorID != nil && !isValid16bitsHexID(*device.VendorID) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device vendor ID %s has an invalid format. Vendor ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", *device.VendorID))) - } - if device.DeviceID != nil && !isValid16bitsHexID(*device.DeviceID) { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device model ID %s has an invalid format. Model ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", *device.DeviceID))) - } - if device.DeviceID != nil && device.VendorID == nil { - allErrs = append(allErrs, field.Invalid(field.NewPath("spec.net.devices"), r.Spec.Net.Devices, fmt.Sprintf("device model ID can not be used without specifying the device vendor ID."))) - } - } - return allErrs -} - -func isValid16bitsHexID(v string) bool { - re := regexp.MustCompile("^0x[0-9a-fA-F]+$") - return re.MatchString(v) && len(v) < 7 -} diff --git a/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go b/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go deleted file mode 100644 index 7615c7990..000000000 --- a/pkg/apis/pao/v2/performanceprofile_validation_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package v2 - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestProfile(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Profile Suite") -} diff --git a/pkg/apis/pao/v2/performanceprofile_validation_test.go b/pkg/apis/pao/v2/performanceprofile_validation_test.go deleted file mode 100644 index 5ae98e495..000000000 --- a/pkg/apis/pao/v2/performanceprofile_validation_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package v2 - -import ( - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/pointer" -) - -const ( - NodeSelectorRole = "barRole" -) - -const ( - // HugePageSize defines the huge page size used for tests - HugePageSize1G = HugePageSize("1G") - // HugePagesCount defines the huge page count used for tests - HugePagesCount = 4 - // IsolatedCPUs defines the isolated CPU set used for tests - IsolatedCPUs = CPUSet("4-7") - // ReservedCPUs defines the reserved CPU set used for tests - ReservedCPUs = CPUSet("0-3") - // SingleNUMAPolicy defines the topologyManager policy used for tests - SingleNUMAPolicy = "single-numa-node" - - //MachineConfigLabelKey defines the MachineConfig label key of the test profile - MachineConfigLabelKey = "mcKey" - //MachineConfigLabelValue defines the MachineConfig label value of the test profile - MachineConfigLabelValue = "mcValue" - //MachineConfigPoolLabelKey defines the MachineConfigPool label key of the test profile - MachineConfigPoolLabelKey = "mcpKey" - //MachineConfigPoolLabelValue defines the MachineConfigPool label value of the test profile - MachineConfigPoolLabelValue = "mcpValue" - - //NetDeviceName defines a net device name for the test profile - NetDeviceName = "enp0s4" - //NetDeviceVendorID defines a net device vendor ID for the test profile - NetDeviceVendorID = "0x1af4" - //NetDeviceModelID defines a net device model ID for the test profile - NetDeviceModelID = "0x1000" -) - -// NewPerformanceProfile returns new performance profile object that used for tests -func NewPerformanceProfile(name string) *PerformanceProfile { - size := HugePageSize1G - isolatedCPUs := IsolatedCPUs - reservedCPUs := ReservedCPUs - numaPolicy := SingleNUMAPolicy - - netDeviceName := NetDeviceName - netDeviceVendorID := NetDeviceVendorID - netDeviceModelID := NetDeviceModelID - - return &PerformanceProfile{ - TypeMeta: metav1.TypeMeta{Kind: "PerformanceProfile"}, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - UID: "11111111-1111-1111-1111-1111111111111", - }, - Spec: PerformanceProfileSpec{ - CPU: &CPU{ - Isolated: &isolatedCPUs, - Reserved: &reservedCPUs, - }, - HugePages: &HugePages{ - DefaultHugePagesSize: &size, - Pages: []HugePage{ - { - Count: HugePagesCount, - Size: size, - }, - }, - }, - RealTimeKernel: &RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NUMA: &NUMA{ - TopologyPolicy: &numaPolicy, - }, - Net: &Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []Device{ - { - InterfaceName: &netDeviceName, - VendorID: &netDeviceVendorID, - DeviceID: &netDeviceModelID, - }, - }, - }, - MachineConfigLabel: map[string]string{ - MachineConfigLabelKey: MachineConfigLabelValue, - }, - MachineConfigPoolSelector: map[string]string{ - MachineConfigPoolLabelKey: MachineConfigPoolLabelValue, - }, - NodeSelector: map[string]string{ - "nodekey": "nodeValue", - }, - }, - } -} - -var _ = Describe("PerformanceProfile", func() { - var profile *PerformanceProfile - - BeforeEach(func() { - profile = NewPerformanceProfile("test") - }) - - Describe("CPU validation", func() { - It("should have CPU fields populated", func() { - errors := profile.validateCPUs() - Expect(errors).To(BeEmpty(), "should not have validation errors with populated CPU fields") - - profile.Spec.CPU.Isolated = nil - errors = profile.validateCPUs() - Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU Isolated field") - Expect(errors[0].Error()).To(ContainSubstring("isolated CPUs required")) - - cpus := CPUSet("0") - profile.Spec.CPU.Isolated = &cpus - profile.Spec.CPU.Reserved = nil - errors = profile.validateCPUs() - Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU reserved field") - Expect(errors[0].Error()).To(ContainSubstring("reserved CPUs required")) - - invalidCPUs := CPUSet("bla") - profile.Spec.CPU.Isolated = &invalidCPUs - errors = profile.validateCPUs() - Expect(errors).NotTo(BeEmpty(), "should have validation error when isolated CPUs has invalid format") - - profile.Spec.CPU = nil - errors = profile.validateCPUs() - Expect(errors).NotTo(BeEmpty(), "should have validation error with missing CPU") - Expect(errors[0].Error()).To(ContainSubstring("cpu section required")) - }) - - It("should allow cpus allocation with no reserved CPUs", func() { - reservedCPUs := CPUSet("") - isolatedCPUs := CPUSet("0-7") - profile.Spec.CPU.Reserved = &reservedCPUs - profile.Spec.CPU.Isolated = &isolatedCPUs - errors := profile.validateCPUs() - Expect(errors).To(BeEmpty()) - }) - - It("should reject cpus allocation with no isolated CPUs", func() { - reservedCPUs := CPUSet("0-3") - isolatedCPUs := CPUSet("") - profile.Spec.CPU.Reserved = &reservedCPUs - profile.Spec.CPU.Isolated = &isolatedCPUs - errors := profile.validateCPUs() - Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring("isolated CPUs can not be empty")) - }) - - It("should reject cpus allocation with overlapping sets", func() { - reservedCPUs := CPUSet("0-7") - isolatedCPUs := CPUSet("0-15") - profile.Spec.CPU.Reserved = &reservedCPUs - profile.Spec.CPU.Isolated = &isolatedCPUs - errors := profile.validateCPUs() - Expect(errors).NotTo(BeEmpty(), "should have validation error when reserved and isolation CPUs have overlap") - Expect(errors[0].Error()).To(ContainSubstring("reserved and isolated cpus overlap")) - }) - }) - - Describe("Label selectors validation", func() { - It("should have 0 or 1 MachineConfigLabels", func() { - errors := profile.validateSelectors() - Expect(errors).To(BeEmpty(), "should not have validation errors when the profile has only 1 MachineConfigSelector") - - profile.Spec.MachineConfigLabel["foo"] = "bar" - errors = profile.validateSelectors() - Expect(errors).NotTo(BeEmpty(), "should have validation error when the profile has two machine config selectors") - Expect(errors[0].Error()).To(ContainSubstring("you should provide only 1 MachineConfigLabel")) - - profile.Spec.MachineConfigLabel = nil - setValidNodeSelector(profile) - - errors = profile.validateSelectors() - Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config selector nil") - }) - - It("should should have 0 or 1 MachineConfigPoolSelector labels", func() { - errors := profile.validateSelectors() - Expect(errors).To(BeEmpty(), "should not have validation errors when the profile has only 1 MachineConfigPoolSelector") - - profile.Spec.MachineConfigPoolSelector["foo"] = "bar" - errors = profile.validateSelectors() - Expect(errors).NotTo(BeEmpty(), "should have validation error when the profile has two machine config pool selectors") - Expect(errors[0].Error()).To(ContainSubstring("you should provide only 1 MachineConfigPoolSelector")) - - profile.Spec.MachineConfigPoolSelector = nil - setValidNodeSelector(profile) - - errors = profile.validateSelectors() - Expect(profile.validateSelectors()).To(BeEmpty(), "should not have validation errors when machine config pool selector nil") - }) - - It("should have sensible NodeSelector in case MachineConfigLabel or MachineConfigPoolSelector is empty", func() { - profile.Spec.MachineConfigLabel = nil - errors := profile.validateSelectors() - Expect(errors).NotTo(BeEmpty(), "should have validation error with invalid NodeSelector") - Expect(errors[0].Error()).To(ContainSubstring("invalid NodeSelector label key that can't be split into domain/role")) - - setValidNodeSelector(profile) - errors = profile.validateSelectors() - Expect(errors).To(BeEmpty(), "should not have validation errors when the node selector has correct format") - }) - }) - - Describe("Hugepages validation", func() { - It("should reject on incorrect default hugepages size", func() { - incorrectDefaultSize := HugePageSize("!#@") - profile.Spec.HugePages.DefaultHugePagesSize = &incorrectDefaultSize - - errors := profile.validateHugePages() - Expect(errors).NotTo(BeEmpty(), "should have validation error when default huge pages size has invalid value") - Expect(errors[0].Error()).To(ContainSubstring("hugepages default size should be equal")) - }) - - It("should reject hugepages allocation with unexpected page size", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 128, - Node: pointer.Int32Ptr(0), - Size: "14M", - }) - errors := profile.validateHugePages() - Expect(errors).NotTo(BeEmpty(), "should have validation error when page with invalid format presents") - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page size should be equal to %q or %q", hugepagesSize1G, hugepagesSize2M))) - }) - - When("pages have duplication", func() { - Context("with specified NUMA node", func() { - It("should raise the validation error", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 128, - Size: hugepagesSize1G, - Node: pointer.Int32Ptr(0), - }) - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 64, - Size: hugepagesSize1G, - Node: pointer.Int32Ptr(0), - }) - errors := profile.validateHugePages() - Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and with specified NUMA node 0, has duplication", hugepagesSize1G))) - }) - }) - - Context("without specified NUMA node", func() { - It("should raise the validation error", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 128, - Size: hugepagesSize1G, - }) - errors := profile.validateHugePages() - Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", hugepagesSize1G))) - }) - }) - - Context("with not sequentially duplication blocks", func() { - It("should raise the validation error", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 128, - Size: hugepagesSize2M, - }) - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, HugePage{ - Count: 128, - Size: hugepagesSize1G, - }) - errors := profile.validateHugePages() - Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("the page with the size %q and without the specified NUMA node, has duplication", hugepagesSize1G))) - }) - }) - }) - }) - - Describe("Net validation", func() { - Context("with properly populated fields", func() { - It("should have net fields properly populated", func() { - errors := profile.validateNet() - Expect(errors).To(BeEmpty(), "should not have validation errors with properly populated net devices fields") - }) - }) - Context("with misconfigured fields", func() { - It("should raise the validation syntax errors", func() { - invalidVendor := "123" - invalidDevice := "0x12345" - profile.Spec.Net.Devices[0].InterfaceName = pointer.StringPtr("") - profile.Spec.Net.Devices[0].VendorID = pointer.StringPtr(invalidVendor) - profile.Spec.Net.Devices[0].DeviceID = pointer.StringPtr(invalidDevice) - errors := profile.validateNet() - Expect(len(errors)).To(Equal(3)) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device name cannot be empty"))) - Expect(errors[1].Error()).To(ContainSubstring(fmt.Sprintf("device vendor ID %s has an invalid format. Vendor ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidVendor))) - Expect(errors[2].Error()).To(ContainSubstring(fmt.Sprintf("device model ID %s has an invalid format. Model ID should be represented as 0x<4 hexadecimal digits> (16 bit representation)", invalidDevice))) - - }) - It("should raise the validation errors for missing fields", func() { - profile.Spec.Net.Devices[0].VendorID = nil - profile.Spec.Net.Devices[0].DeviceID = pointer.StringPtr("0x1") - errors := profile.validateNet() - Expect(errors).NotTo(BeEmpty()) - Expect(errors[0].Error()).To(ContainSubstring(fmt.Sprintf("device model ID can not be used without specifying the device vendor ID."))) - }) - }) - }) -}) - -func setValidNodeSelector(profile *PerformanceProfile) { - selector := make(map[string]string) - selector["fooDomain/"+NodeSelectorRole] = "" - profile.Spec.NodeSelector = selector -} diff --git a/pkg/apis/pao/v2/performanceprofile_webhook.go b/pkg/apis/pao/v2/performanceprofile_webhook.go deleted file mode 100644 index 3f6ebba34..000000000 --- a/pkg/apis/pao/v2/performanceprofile_webhook.go +++ /dev/null @@ -1,23 +0,0 @@ -package v2 - -import ( - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook" -) - -var _ webhook.Validator = &PerformanceProfile{} - -// we need this variable only because our validate methods should have access to the client -var validatorClient client.Client - -// SetupWebhookWithManager enables Webhooks - needed for version conversion -func (r *PerformanceProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { - if validatorClient == nil { - validatorClient = mgr.GetClient() - } - - return ctrl.NewWebhookManagedBy(mgr). - For(r). - Complete() -} diff --git a/pkg/apis/pao/v2/zz_generated.deepcopy.go b/pkg/apis/pao/v2/zz_generated.deepcopy.go deleted file mode 100644 index 79b55102e..000000000 --- a/pkg/apis/pao/v2/zz_generated.deepcopy.go +++ /dev/null @@ -1,363 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by controller-gen. DO NOT EDIT. - -package v2 - -import ( - "github.com/openshift/custom-resource-status/conditions/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CPU) DeepCopyInto(out *CPU) { - *out = *in - if in.Reserved != nil { - in, out := &in.Reserved, &out.Reserved - *out = new(CPUSet) - **out = **in - } - if in.Isolated != nil { - in, out := &in.Isolated, &out.Isolated - *out = new(CPUSet) - **out = **in - } - if in.BalanceIsolated != nil { - in, out := &in.BalanceIsolated, &out.BalanceIsolated - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU. -func (in *CPU) DeepCopy() *CPU { - if in == nil { - return nil - } - out := new(CPU) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Device) DeepCopyInto(out *Device) { - *out = *in - if in.InterfaceName != nil { - in, out := &in.InterfaceName, &out.InterfaceName - *out = new(string) - **out = **in - } - if in.VendorID != nil { - in, out := &in.VendorID, &out.VendorID - *out = new(string) - **out = **in - } - if in.DeviceID != nil { - in, out := &in.DeviceID, &out.DeviceID - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Device. -func (in *Device) DeepCopy() *Device { - if in == nil { - return nil - } - out := new(Device) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePage) DeepCopyInto(out *HugePage) { - *out = *in - if in.Node != nil { - in, out := &in.Node, &out.Node - *out = new(int32) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePage. -func (in *HugePage) DeepCopy() *HugePage { - if in == nil { - return nil - } - out := new(HugePage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HugePages) DeepCopyInto(out *HugePages) { - *out = *in - if in.DefaultHugePagesSize != nil { - in, out := &in.DefaultHugePagesSize, &out.DefaultHugePagesSize - *out = new(HugePageSize) - **out = **in - } - if in.Pages != nil { - in, out := &in.Pages, &out.Pages - *out = make([]HugePage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HugePages. -func (in *HugePages) DeepCopy() *HugePages { - if in == nil { - return nil - } - out := new(HugePages) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NUMA) DeepCopyInto(out *NUMA) { - *out = *in - if in.TopologyPolicy != nil { - in, out := &in.TopologyPolicy, &out.TopologyPolicy - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NUMA. -func (in *NUMA) DeepCopy() *NUMA { - if in == nil { - return nil - } - out := new(NUMA) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Net) DeepCopyInto(out *Net) { - *out = *in - if in.UserLevelNetworking != nil { - in, out := &in.UserLevelNetworking, &out.UserLevelNetworking - *out = new(bool) - **out = **in - } - if in.Devices != nil { - in, out := &in.Devices, &out.Devices - *out = make([]Device, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Net. -func (in *Net) DeepCopy() *Net { - if in == nil { - return nil - } - out := new(Net) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfile) DeepCopyInto(out *PerformanceProfile) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfile. -func (in *PerformanceProfile) DeepCopy() *PerformanceProfile { - if in == nil { - return nil - } - out := new(PerformanceProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfile) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileList) DeepCopyInto(out *PerformanceProfileList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PerformanceProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileList. -func (in *PerformanceProfileList) DeepCopy() *PerformanceProfileList { - if in == nil { - return nil - } - out := new(PerformanceProfileList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PerformanceProfileList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileSpec) DeepCopyInto(out *PerformanceProfileSpec) { - *out = *in - if in.CPU != nil { - in, out := &in.CPU, &out.CPU - *out = new(CPU) - (*in).DeepCopyInto(*out) - } - if in.HugePages != nil { - in, out := &in.HugePages, &out.HugePages - *out = new(HugePages) - (*in).DeepCopyInto(*out) - } - if in.MachineConfigLabel != nil { - in, out := &in.MachineConfigLabel, &out.MachineConfigLabel - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.MachineConfigPoolSelector != nil { - in, out := &in.MachineConfigPoolSelector, &out.MachineConfigPoolSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.RealTimeKernel != nil { - in, out := &in.RealTimeKernel, &out.RealTimeKernel - *out = new(RealTimeKernel) - (*in).DeepCopyInto(*out) - } - if in.AdditionalKernelArgs != nil { - in, out := &in.AdditionalKernelArgs, &out.AdditionalKernelArgs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NUMA != nil { - in, out := &in.NUMA, &out.NUMA - *out = new(NUMA) - (*in).DeepCopyInto(*out) - } - if in.Net != nil { - in, out := &in.Net, &out.Net - *out = new(Net) - (*in).DeepCopyInto(*out) - } - if in.GloballyDisableIrqLoadBalancing != nil { - in, out := &in.GloballyDisableIrqLoadBalancing, &out.GloballyDisableIrqLoadBalancing - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileSpec. -func (in *PerformanceProfileSpec) DeepCopy() *PerformanceProfileSpec { - if in == nil { - return nil - } - out := new(PerformanceProfileSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PerformanceProfileStatus) DeepCopyInto(out *PerformanceProfileStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Tuned != nil { - in, out := &in.Tuned, &out.Tuned - *out = new(string) - **out = **in - } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(string) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerformanceProfileStatus. -func (in *PerformanceProfileStatus) DeepCopy() *PerformanceProfileStatus { - if in == nil { - return nil - } - out := new(PerformanceProfileStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RealTimeKernel) DeepCopyInto(out *RealTimeKernel) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = new(bool) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RealTimeKernel. -func (in *RealTimeKernel) DeepCopy() *RealTimeKernel { - if in == nil { - return nil - } - out := new(RealTimeKernel) - in.DeepCopyInto(out) - return out -} diff --git a/pkg/pao/cmd/render/render.go b/pkg/pao/cmd/render/render.go deleted file mode 100644 index 2aceeb849..000000000 --- a/pkg/pao/cmd/render/render.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package render - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/ghodss/yaml" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/manifestset" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" -) - -type renderOpts struct { - performanceProfileInputFiles performanceProfileFiles - assetsInDir string - assetsOutDir string -} - -type performanceProfileFiles []string - -func (ppf *performanceProfileFiles) String() string { - return fmt.Sprint(*ppf) -} - -func (ppf *performanceProfileFiles) Type() string { - return "performanceProfileFiles" -} - -// Set parse performance-profile-input-files flag and store it in ppf -func (ppf *performanceProfileFiles) Set(value string) error { - if len(*ppf) > 0 { - return errors.New("performance-profile-input-files flag already set") - } - - for _, s := range strings.Split(value, ",") { - *ppf = append(*ppf, s) - } - return nil -} - -//NewRenderCommand creates a render command. -func NewRenderCommand() *cobra.Command { - renderOpts := renderOpts{} - - cmd := &cobra.Command{ - Use: "render", - Short: "Render performance-addon-operator manifests", - Run: func(cmd *cobra.Command, args []string) { - - if err := renderOpts.Validate(); err != nil { - klog.Fatal(err) - } - - if err := renderOpts.Run(); err != nil { - klog.Fatal(err) - } - }, - } - - renderOpts.AddFlags(cmd.Flags()) - - return cmd -} - -func (r *renderOpts) AddFlags(fs *pflag.FlagSet) { - fs.Var(&r.performanceProfileInputFiles, "performance-profile-input-files", "A comma-separated list of performance-profile manifests.") - fs.StringVar(&r.assetsInDir, "asset-input-dir", components.AssetsDir, "Input path for the assets directory.") - fs.StringVar(&r.assetsOutDir, "asset-output-dir", r.assetsOutDir, "Output path for the rendered manifests.") - // environment variables has precedence over standard input - r.readFlagsFromEnv() -} - -func (r *renderOpts) readFlagsFromEnv() { - if ppInFiles := os.Getenv("PERFORMANCE_PROFILE_INPUT_FILES"); len(ppInFiles) > 0 { - r.performanceProfileInputFiles.Set(ppInFiles) - } - - if assetInDir := os.Getenv("ASSET_INPUT_DIR"); len(assetInDir) > 0 { - r.assetsInDir = assetInDir - } - - if assetsOutDir := os.Getenv("ASSET_OUTPUT_DIR"); len(assetsOutDir) > 0 { - r.assetsOutDir = assetsOutDir - } -} - -func (r *renderOpts) Validate() error { - if len(r.performanceProfileInputFiles) == 0 { - return fmt.Errorf("performance-profile-input-files must be specified") - } - - if len(r.assetsOutDir) == 0 { - return fmt.Errorf("asset-output-dir must be specified") - } - - return nil -} - -func (r *renderOpts) Run() error { - for _, pp := range r.performanceProfileInputFiles { - b, err := ioutil.ReadFile(pp) - if err != nil { - return err - } - - profile := &performancev2.PerformanceProfile{} - err = yaml.Unmarshal(b, profile) - if err != nil { - return err - } - - components, err := manifestset.GetNewComponents(profile, nil) - if err != nil { - return err - } - or := []v1.OwnerReference{ - { - Kind: profile.Kind, - Name: profile.Name, - }, - } - - for _, componentObj := range components.ToObjects() { - componentObj.SetOwnerReferences(or) - } - - for kind, manifest := range components.ToManifestTable() { - b, err := yaml.Marshal(manifest) - if err != nil { - return err - } - - fileName := fmt.Sprintf("%s_%s.yaml", profile.Name, strings.ToLower(kind)) - err = ioutil.WriteFile(filepath.Join(r.assetsOutDir, fileName), b, 0644) - if err != nil { - return err - } - } - } - - return nil -} diff --git a/pkg/pao/controller/performanceprofile/components/consts.go b/pkg/pao/controller/performanceprofile/components/consts.go deleted file mode 100644 index 9318baf14..000000000 --- a/pkg/pao/controller/performanceprofile/components/consts.go +++ /dev/null @@ -1,31 +0,0 @@ -package components - -const ( - // AssetsDir defines the directory with assets under the operator image - AssetsDir = "/assets" -) - -const ( - // ComponentNamePrefix defines the worker role for performance sensitive workflows - // TODO: change it back to longer name once https://bugzilla.redhat.com/show_bug.cgi?id=1787907 fixed - // ComponentNamePrefix = "worker-performance" - ComponentNamePrefix = "performance" - // MachineConfigRoleLabelKey is the label key to use as label and in MachineConfigSelector of MCP which targets the performance profile - MachineConfigRoleLabelKey = "machineconfiguration.openshift.io/role" - // NodeRoleLabelPrefix is the prefix for the role label of a node - NodeRoleLabelPrefix = "node-role.kubernetes.io/" -) - -const ( - // NamespaceNodeTuningOperator defines the tuned profiles namespace - NamespaceNodeTuningOperator = "openshift-cluster-node-tuning-operator" - // ProfileNamePerformance defines the performance tuned profile name - ProfileNamePerformance = "openshift-node-performance" -) - -const ( - // HugepagesSize2M contains the size of 2M hugepages - HugepagesSize2M = "2M" - // HugepagesSize1G contains the size of 1G hugepages - HugepagesSize1G = "1G" -) diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go deleted file mode 100644 index 45d20d24e..000000000 --- a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig.go +++ /dev/null @@ -1,167 +0,0 @@ -package kubeletconfig - -import ( - "encoding/json" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -const ( - // experimentalKubeletSnippetAnnotation contains the annotation key that should be used to provide a KubeletConfig snippet with additional - // configurations you want to apply on top of the generated KubeletConfig resource. - // To find the specific argument see https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/. - // By default, the performance-addon-operator will override: - // 1. CPU manager policy - // 2. CPU manager reconcile period - // 3. Topology manager policy - // 4. Reserved CPUs - // 5. Memory manager policy - // Please avoid specifying them and use the relevant API to configure these parameters. - experimentalKubeletSnippetAnnotation = "kubeletconfig.experimental" - cpuManagerPolicyStatic = "static" - cpuManagerPolicyOptionFullPCPUsOnly = "full-pcpus-only" - memoryManagerPolicyStatic = "Static" - defaultKubeReservedMemory = "500Mi" - defaultSystemReservedMemory = "500Mi" - defaultHardEvictionThreshold = "100Mi" - evictionHardMemoryAvailable = "memory.available" -) - -// New returns new KubeletConfig object for performance sensetive workflows -func New(profile *performancev2.PerformanceProfile, profileMCPLabels map[string]string) (*machineconfigv1.KubeletConfig, error) { - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - kubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{} - if v, ok := profile.Annotations[experimentalKubeletSnippetAnnotation]; ok { - if err := json.Unmarshal([]byte(v), kubeletConfig); err != nil { - return nil, err - } - } - - kubeletConfig.TypeMeta = metav1.TypeMeta{ - APIVersion: kubeletconfigv1beta1.SchemeGroupVersion.String(), - Kind: "KubeletConfiguration", - } - - kubeletConfig.CPUManagerPolicy = cpuManagerPolicyStatic - kubeletConfig.CPUManagerReconcilePeriod = metav1.Duration{Duration: 5 * time.Second} - kubeletConfig.TopologyManagerPolicy = kubeletconfigv1beta1.BestEffortTopologyManagerPolicy - - // set the default hard eviction memory threshold - if kubeletConfig.EvictionHard == nil { - kubeletConfig.EvictionHard = map[string]string{} - } - if _, ok := kubeletConfig.EvictionHard[evictionHardMemoryAvailable]; !ok { - kubeletConfig.EvictionHard[evictionHardMemoryAvailable] = defaultHardEvictionThreshold - } - - // set the default memory kube-reserved - if kubeletConfig.KubeReserved == nil { - kubeletConfig.KubeReserved = map[string]string{} - } - if _, ok := kubeletConfig.KubeReserved[string(corev1.ResourceMemory)]; !ok { - kubeletConfig.KubeReserved[string(corev1.ResourceMemory)] = defaultKubeReservedMemory - } - - // set the default memory system-reserved - if kubeletConfig.SystemReserved == nil { - kubeletConfig.SystemReserved = map[string]string{} - } - if _, ok := kubeletConfig.SystemReserved[string(corev1.ResourceMemory)]; !ok { - kubeletConfig.SystemReserved[string(corev1.ResourceMemory)] = defaultSystemReservedMemory - } - - if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil { - kubeletConfig.ReservedSystemCPUs = string(*profile.Spec.CPU.Reserved) - } - - if profile.Spec.NUMA != nil { - if profile.Spec.NUMA.TopologyPolicy != nil { - topologyPolicy := *profile.Spec.NUMA.TopologyPolicy - kubeletConfig.TopologyManagerPolicy = topologyPolicy - - // set the memory manager policy to static only when the topology policy is - // restricted or single NUMA node - if topologyPolicy == kubeletconfigv1beta1.RestrictedTopologyManagerPolicy || - topologyPolicy == kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy { - kubeletConfig.MemoryManagerPolicy = memoryManagerPolicyStatic - - if kubeletConfig.ReservedMemory == nil { - reservedMemory := resource.NewQuantity(0, resource.DecimalSI) - if err := addStringToQuantity(reservedMemory, kubeletConfig.KubeReserved[string(corev1.ResourceMemory)]); err != nil { - return nil, err - } - if err := addStringToQuantity(reservedMemory, kubeletConfig.SystemReserved[string(corev1.ResourceMemory)]); err != nil { - return nil, err - } - if err := addStringToQuantity(reservedMemory, kubeletConfig.EvictionHard[evictionHardMemoryAvailable]); err != nil { - return nil, err - } - - kubeletConfig.ReservedMemory = []kubeletconfigv1beta1.MemoryReservation{ - { - // the NUMA node 0 is the only safe choice for non NUMA machines - // in the future we can extend our API to get this information from a user - NumaNode: 0, - Limits: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceMemory: *reservedMemory, - }, - }, - } - } - - // require full physical CPUs only to ensure maximum isolation - if topologyPolicy == kubeletconfigv1beta1.SingleNumaNodeTopologyManagerPolicy { - if kubeletConfig.CPUManagerPolicyOptions == nil { - kubeletConfig.CPUManagerPolicyOptions = make(map[string]string) - } - - if _, ok := kubeletConfig.CPUManagerPolicyOptions[cpuManagerPolicyOptionFullPCPUsOnly]; !ok { - kubeletConfig.CPUManagerPolicyOptions[cpuManagerPolicyOptionFullPCPUsOnly] = "true" - } - } - } - } - } - - raw, err := json.Marshal(kubeletConfig) - if err != nil { - return nil, err - } - - return &machineconfigv1.KubeletConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: machineconfigv1.GroupVersion.String(), - Kind: "KubeletConfig", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: machineconfigv1.KubeletConfigSpec{ - MachineConfigPoolSelector: &metav1.LabelSelector{ - MatchLabels: profileMCPLabels, - }, - KubeletConfig: &runtime.RawExtension{ - Raw: raw, - }, - }, - }, nil -} - -func addStringToQuantity(q *resource.Quantity, value string) error { - v, err := resource.ParseQuantity(value) - if err != nil { - return err - } - q.Add(v) - - return nil -} diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go deleted file mode 100644 index 39830753d..000000000 --- a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package kubeletconfig - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestKubeletConfig(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Kubelet Config Suite") -} diff --git a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go b/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go deleted file mode 100644 index df519651d..000000000 --- a/pkg/pao/controller/performanceprofile/components/kubeletconfig/kubeletconfig_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package kubeletconfig - -import ( - "fmt" - - "github.com/ghodss/yaml" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing" -) - -const testReservedMemory = `reservedMemory: - - limits: - memory: 1100Mi - numaNode: 0` - -var _ = Describe("Kubelet Config", func() { - It("should generate yaml with expected parameters", func() { - profile := testutils.NewPerformanceProfile("test") - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - Expect(err).ToNot(HaveOccurred()) - - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - - Expect(manifest).To(ContainSubstring(fmt.Sprintf("%s: %s", selectorKey, selectorValue))) - Expect(manifest).To(ContainSubstring("reservedSystemCPUs: 0-3")) - Expect(manifest).To(ContainSubstring("topologyManagerPolicy: single-numa-node")) - Expect(manifest).To(ContainSubstring("cpuManagerPolicy: static")) - Expect(manifest).To(ContainSubstring("memoryManagerPolicy: Static")) - Expect(manifest).To(ContainSubstring("cpuManagerPolicyOptions")) - Expect(manifest).To(ContainSubstring(testReservedMemory)) - }) - - Context("with topology manager restricted policy", func() { - It("should have the memory manager related parameters", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - Expect(err).ToNot(HaveOccurred()) - - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).To(ContainSubstring("memoryManagerPolicy: Static")) - Expect(manifest).To(ContainSubstring(testReservedMemory)) - }) - - It("should not have the cpumanager policy options set", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.RestrictedTopologyManagerPolicy) - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - Expect(err).ToNot(HaveOccurred()) - - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).ToNot(ContainSubstring("cpuManagerPolicyOptions")) - }) - - }) - - Context("with topology manager best-effort policy", func() { - It("should not have the memory manager related parameters", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Spec.NUMA.TopologyPolicy = pointer.String(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy) - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - Expect(err).ToNot(HaveOccurred()) - - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).ToNot(ContainSubstring("memoryManagerPolicy: Static")) - Expect(manifest).ToNot(ContainSubstring(testReservedMemory)) - }) - }) - - Context("with additional kubelet arguments", func() { - It("should not override CPU manager parameters", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Annotations = map[string]string{ - experimentalKubeletSnippetAnnotation: `{"cpuManagerPolicy": "none", "cpuManagerReconcilePeriod": "10s", "reservedSystemCPUs": "4,5"}`, - } - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).ToNot(ContainSubstring("cpuManagerPolicy: none")) - Expect(manifest).ToNot(ContainSubstring("cpuManagerReconcilePeriod: 10s")) - Expect(manifest).ToNot(ContainSubstring("reservedSystemCPUs: 4-5")) - }) - - It("should not override topology manager parameters", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Annotations = map[string]string{ - experimentalKubeletSnippetAnnotation: `{"topologyManagerPolicy": "none"}`, - } - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).ToNot(ContainSubstring("topologyManagerPolicy: none")) - }) - - It("should not override memory manager policy", func() { - profile := testutils.NewPerformanceProfile("test") - - profile.Annotations = map[string]string{ - experimentalKubeletSnippetAnnotation: `{"memoryManagerPolicy": "None", "reservedMemory": [{"numaNode": 10, "limits": {"test": "1024"}}]}`, - } - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).ToNot(ContainSubstring("memoryManagerPolicy: None")) - Expect(manifest).To(ContainSubstring("numaNode: 10")) - }) - - It("should set the kubelet config accordingly", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Annotations = map[string]string{ - experimentalKubeletSnippetAnnotation: `{"allowedUnsafeSysctls": ["net.core.somaxconn"], "evictionHard": {"memory.available": "200Mi"}}`, - } - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - y, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(y) - Expect(manifest).To(ContainSubstring("net.core.somaxconn")) - Expect(manifest).To(ContainSubstring("memory.available: 200Mi")) - }) - - It("should allow to override the cpumanager policy options and update the kubelet config accordingly", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Annotations = map[string]string{ - experimentalKubeletSnippetAnnotation: `{"allowedUnsafeSysctls": ["net.core.somaxconn"], "cpuManagerPolicyOptions": {"full-pcpus-only": "false"}}`, - } - selectorKey, selectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := New(profile, map[string]string{selectorKey: selectorValue}) - data, err := yaml.Marshal(kc) - Expect(err).ToNot(HaveOccurred()) - - manifest := string(data) - Expect(manifest).To(ContainSubstring("net.core.somaxconn")) - Expect(manifest).To(ContainSubstring(`full-pcpus-only: "false"`)) - }) - - }) -}) diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go deleted file mode 100644 index f4b2ef079..000000000 --- a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig.go +++ /dev/null @@ -1,354 +0,0 @@ -package machineconfig - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io/ioutil" - "path/filepath" - "text/template" - - assets "github.com/openshift/cluster-node-tuning-operator/assets/pao" - - "github.com/coreos/go-systemd/unit" - igntypes "github.com/coreos/ignition/v2/config/v3_2/types" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -const ( - defaultIgnitionVersion = "3.2.0" - defaultIgnitionContentSource = "data:text/plain;charset=utf-8;base64" -) - -const ( - // MCKernelRT is the value of the kernel setting in MachineConfig for the RT kernel - MCKernelRT = "realtime" - // MCKernelDefault is the value of the kernel setting in MachineConfig for the default kernel - MCKernelDefault = "default" - // HighPerformanceRuntime contains the name of the high-performance runtime - HighPerformanceRuntime = "high-performance" - - bashScriptsDir = "/usr/local/bin" - crioConfd = "/etc/crio/crio.conf.d" - crioRuntimesConfig = "99-runtimes.conf" - // OCIHooksConfigDir is the default directory for the OCI hooks - OCIHooksConfigDir = "/etc/containers/oci/hooks.d" - // OCIHooksConfig file contains the low latency hooks configuration - OCIHooksConfig = "99-low-latency-hooks.json" - ociTemplateRPSMask = "RPSMask" - udevRulesDir = "/etc/udev/rules.d" - udevRpsRules = "99-netdev-rps.rules" - // scripts - hugepagesAllocation = "hugepages-allocation" - ociHooks = "low-latency-hooks" - setRPSMask = "set-rps-mask" -) - -const ( - systemdSectionUnit = "Unit" - systemdSectionService = "Service" - systemdSectionInstall = "Install" - systemdDescription = "Description" - systemdBefore = "Before" - systemdEnvironment = "Environment" - systemdType = "Type" - systemdRemainAfterExit = "RemainAfterExit" - systemdExecStart = "ExecStart" - systemdWantedBy = "WantedBy" -) - -const ( - systemdServiceKubelet = "kubelet.service" - systemdServiceTypeOneshot = "oneshot" - systemdTargetMultiUser = "multi-user.target" - systemdTrue = "true" -) - -const ( - environmentHugepagesSize = "HUGEPAGES_SIZE" - environmentHugepagesCount = "HUGEPAGES_COUNT" - environmentNUMANode = "NUMA_NODE" -) - -const ( - templateReservedCpus = "ReservedCpus" -) - -// New returns new machine configuration object for performance sensitive workloads -func New(profile *performancev2.PerformanceProfile) (*machineconfigv1.MachineConfig, error) { - name := GetMachineConfigName(profile) - mc := &machineconfigv1.MachineConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: machineconfigv1.GroupVersion.String(), - Kind: "MachineConfig", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: profilecomponent.GetMachineConfigLabel(profile), - }, - Spec: machineconfigv1.MachineConfigSpec{}, - } - - ignitionConfig, err := getIgnitionConfig(profile) - if err != nil { - return nil, err - } - - rawIgnition, err := json.Marshal(ignitionConfig) - if err != nil { - return nil, err - } - mc.Spec.Config = runtime.RawExtension{Raw: rawIgnition} - - enableRTKernel := profile.Spec.RealTimeKernel != nil && - profile.Spec.RealTimeKernel.Enabled != nil && - *profile.Spec.RealTimeKernel.Enabled - - if enableRTKernel { - mc.Spec.KernelType = MCKernelRT - } else { - mc.Spec.KernelType = MCKernelDefault - } - - return mc, nil -} - -// GetMachineConfigName generates machine config name from the performance profile -func GetMachineConfigName(profile *performancev2.PerformanceProfile) string { - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - return fmt.Sprintf("50-%s", name) -} - -func getIgnitionConfig(profile *performancev2.PerformanceProfile) (*igntypes.Config, error) { - ignitionConfig := &igntypes.Config{ - Ignition: igntypes.Ignition{ - Version: defaultIgnitionVersion, - }, - Storage: igntypes.Storage{ - Files: []igntypes.File{}, - }, - } - - // add script files under the node /usr/local/bin directory - mode := 0700 - for _, script := range []string{hugepagesAllocation, ociHooks, setRPSMask} { - dst := getBashScriptPath(script) - content, err := assets.Scripts.ReadFile(fmt.Sprintf("scripts/%s.sh", script)) - if err != nil { - return nil, err - } - addContent(ignitionConfig, content, dst, &mode) - } - - // add crio config snippet under the node /etc/crio/crio.conf.d/ directory - crioConfdRuntimesMode := 0644 - crioConfigSnippetContent, err := renderCrioConfigSnippet(profile, filepath.Join("configs", crioRuntimesConfig)) - if err != nil { - return nil, err - } - crioConfSnippetDst := filepath.Join(crioConfd, crioRuntimesConfig) - addContent(ignitionConfig, crioConfigSnippetContent, crioConfSnippetDst, &crioConfdRuntimesMode) - - // add crio hooks config under the node cri-o hook directory - crioHooksConfigsMode := 0644 - ociHooksConfigContent, err := GetOCIHooksConfigContent(OCIHooksConfig, profile) - if err != nil { - return nil, err - } - ociHookConfigDst := filepath.Join(OCIHooksConfigDir, OCIHooksConfig) - addContent(ignitionConfig, ociHooksConfigContent, ociHookConfigDst, &crioHooksConfigsMode) - - // add rps udev rule - rpsRulesMode := 0644 - rpsRulesContent, err := assets.Configs.ReadFile(filepath.Join("configs", udevRpsRules)) - if err != nil { - return nil, err - } - rpsRulesDst := filepath.Join(udevRulesDir, udevRpsRules) - addContent(ignitionConfig, rpsRulesContent, rpsRulesDst, &rpsRulesMode) - - if profile.Spec.HugePages != nil { - for _, page := range profile.Spec.HugePages.Pages { - // we already allocated non NUMA specific hugepages via kernel arguments - if page.Node == nil { - continue - } - - hugepagesSize, err := GetHugepagesSizeKilobytes(page.Size) - if err != nil { - return nil, err - } - - hugepagesService, err := getSystemdContent(getHugepagesAllocationUnitOptions( - hugepagesSize, - page.Count, - *page.Node, - )) - if err != nil { - return nil, err - } - - ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{ - Contents: &hugepagesService, - Enabled: pointer.BoolPtr(true), - Name: getSystemdService(fmt.Sprintf("%s-%skB-NUMA%d", hugepagesAllocation, hugepagesSize, *page.Node)), - }) - } - } - - if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil { - rpsMask, err := components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved)) - if err != nil { - return nil, err - } - - rpsService, err := getSystemdContent(getRPSUnitOptions(rpsMask)) - if err != nil { - return nil, err - } - - ignitionConfig.Systemd.Units = append(ignitionConfig.Systemd.Units, igntypes.Unit{ - Contents: &rpsService, - Name: getSystemdService("update-rps@"), - }) - } - - return ignitionConfig, nil -} - -func getBashScriptPath(scriptName string) string { - return fmt.Sprintf("%s/%s.sh", bashScriptsDir, scriptName) -} - -func getSystemdEnvironment(key string, value string) string { - return fmt.Sprintf("%s=%s", key, value) -} - -func getSystemdService(serviceName string) string { - return fmt.Sprintf("%s.service", serviceName) -} - -func getSystemdContent(options []*unit.UnitOption) (string, error) { - outReader := unit.Serialize(options) - outBytes, err := ioutil.ReadAll(outReader) - if err != nil { - return "", err - } - return string(outBytes), nil -} - -// GetOCIHooksConfigContent reads and returns the content of the OCI hook file -func GetOCIHooksConfigContent(configFile string, profile *performancev2.PerformanceProfile) ([]byte, error) { - ociHookConfigTemplate, err := template.ParseFS(assets.Configs, filepath.Join("configs", configFile)) - if err != nil { - return nil, err - } - - rpsMask := "0" // RPS disabled - if profile.Spec.CPU != nil && profile.Spec.CPU.Reserved != nil { - rpsMask, err = components.CPUListToMaskList(string(*profile.Spec.CPU.Reserved)) - if err != nil { - return nil, err - } - } - - outContent := &bytes.Buffer{} - templateArgs := map[string]string{ociTemplateRPSMask: rpsMask} - if err := ociHookConfigTemplate.Execute(outContent, templateArgs); err != nil { - return nil, err - } - - return outContent.Bytes(), nil -} - -// GetHugepagesSizeKilobytes retruns hugepages size in kilobytes -func GetHugepagesSizeKilobytes(hugepagesSize performancev2.HugePageSize) (string, error) { - switch hugepagesSize { - case "1G": - return "1048576", nil - case "2M": - return "2048", nil - default: - return "", fmt.Errorf("can not convert size %q to kilobytes", hugepagesSize) - } -} - -func getHugepagesAllocationUnitOptions(hugepagesSize string, hugepagesCount int32, numaNode int32) []*unit.UnitOption { - return []*unit.UnitOption{ - // [Unit] - // Description - unit.NewUnitOption(systemdSectionUnit, systemdDescription, fmt.Sprintf("Hugepages-%skB allocation on the node %d", hugepagesSize, numaNode)), - // Before - unit.NewUnitOption(systemdSectionUnit, systemdBefore, systemdServiceKubelet), - // [Service] - // Environment - unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesCount, fmt.Sprint(hugepagesCount))), - unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentHugepagesSize, hugepagesSize)), - unit.NewUnitOption(systemdSectionService, systemdEnvironment, getSystemdEnvironment(environmentNUMANode, fmt.Sprint(numaNode))), - // Type - unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot), - // RemainAfterExit - unit.NewUnitOption(systemdSectionService, systemdRemainAfterExit, systemdTrue), - // ExecStart - unit.NewUnitOption(systemdSectionService, systemdExecStart, getBashScriptPath(hugepagesAllocation)), - // [Install] - // WantedBy - unit.NewUnitOption(systemdSectionInstall, systemdWantedBy, systemdTargetMultiUser), - } -} - -func getRPSUnitOptions(rpsMask string) []*unit.UnitOption { - cmd := fmt.Sprintf("%s %%i %s", getBashScriptPath(setRPSMask), rpsMask) - return []*unit.UnitOption{ - // [Unit] - // Description - unit.NewUnitOption(systemdSectionUnit, systemdDescription, "Sets network devices RPS mask"), - // [Service] - // Type - unit.NewUnitOption(systemdSectionService, systemdType, systemdServiceTypeOneshot), - // ExecStart - unit.NewUnitOption(systemdSectionService, systemdExecStart, cmd), - } -} - -func addContent(ignitionConfig *igntypes.Config, content []byte, dst string, mode *int) { - contentBase64 := base64.StdEncoding.EncodeToString(content) - ignitionConfig.Storage.Files = append(ignitionConfig.Storage.Files, igntypes.File{ - Node: igntypes.Node{ - Path: dst, - }, - FileEmbedded1: igntypes.FileEmbedded1{ - Contents: igntypes.Resource{ - Source: pointer.StringPtr(fmt.Sprintf("%s,%s", defaultIgnitionContentSource, contentBase64)), - }, - Mode: mode, - }, - }) -} - -func renderCrioConfigSnippet(profile *performancev2.PerformanceProfile, src string) ([]byte, error) { - templateArgs := make(map[string]string) - if profile.Spec.CPU.Reserved != nil { - templateArgs[templateReservedCpus] = string(*profile.Spec.CPU.Reserved) - } - - profileTemplate, err := template.ParseFS(assets.Configs, src) - if err != nil { - return nil, err - } - - crioConfig := &bytes.Buffer{} - if err := profileTemplate.Execute(crioConfig, templateArgs); err != nil { - return nil, err - } - - return crioConfig.Bytes(), nil -} diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go deleted file mode 100644 index 84ce7ad85..000000000 --- a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package machineconfig - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestMachineConfig(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Machine Config Suite") -} diff --git a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go b/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go deleted file mode 100644 index e88e1df92..000000000 --- a/pkg/pao/controller/performanceprofile/components/machineconfig/machineconfig_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package machineconfig - -import ( - "fmt" - - "k8s.io/utils/pointer" - - "github.com/ghodss/yaml" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing" -) - -const hugepagesAllocationService = ` - - contents: | - [Unit] - Description=Hugepages-1048576kB allocation on the node 0 - Before=kubelet.service - - [Service] - Environment=HUGEPAGES_COUNT=4 - Environment=HUGEPAGES_SIZE=1048576 - Environment=NUMA_NODE=0 - Type=oneshot - RemainAfterExit=true - ExecStart=/usr/local/bin/hugepages-allocation.sh - - [Install] - WantedBy=multi-user.target - enabled: true - name: hugepages-allocation-1048576kB-NUMA0.service -` - -var _ = Describe("Machine Config", func() { - - Context("machine config creation ", func() { - It("should create machine config with valid assets", func() { - profile := testutils.NewPerformanceProfile("test") - profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(0) - - _, err := New(profile) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("with hugepages with specified NUMA node", func() { - var manifest string - - BeforeEach(func() { - profile := testutils.NewPerformanceProfile("test") - profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(0) - - labelKey, labelValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigLabel) - mc, err := New(profile) - Expect(err).ToNot(HaveOccurred()) - Expect(mc.Spec.KernelType).To(Equal(MCKernelRT)) - - y, err := yaml.Marshal(mc) - Expect(err).ToNot(HaveOccurred()) - - manifest = string(y) - Expect(manifest).To(ContainSubstring(fmt.Sprintf("%s: %s", labelKey, labelValue))) - }) - - It("should not add hugepages kernel boot parameters", func() { - Expect(manifest).ToNot(ContainSubstring("- hugepagesz=1G")) - Expect(manifest).ToNot(ContainSubstring("- hugepages=4")) - }) - - It("should add systemd unit to allocate hugepages", func() { - Expect(manifest).To(ContainSubstring(hugepagesAllocationService)) - }) - - }) -}) diff --git a/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go b/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go deleted file mode 100644 index e1eab5518..000000000 --- a/pkg/pao/controller/performanceprofile/components/manifestset/manifestset.go +++ /dev/null @@ -1,79 +0,0 @@ -package manifestset - -import ( - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/kubeletconfig" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/runtimeclass" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/tuned" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - nodev1beta1 "k8s.io/api/node/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ManifestResultSet contains all component's instances that should be created according to performance-profile -type ManifestResultSet struct { - MachineConfig *mcov1.MachineConfig - KubeletConfig *mcov1.KubeletConfig - Tuned *tunedv1.Tuned - RuntimeClass *nodev1beta1.RuntimeClass -} - -// ManifestTable is map with Kind name as key and component's instance as value -type ManifestTable map[string]interface{} - -// ToObjects return a list of all manifests converted to objects -func (ms *ManifestResultSet) ToObjects() []metav1.Object { - objs := make([]metav1.Object, 0) - - objs = append(objs, - ms.MachineConfig.GetObjectMeta(), - ms.KubeletConfig.GetObjectMeta(), - ms.Tuned.GetObjectMeta(), - ms.RuntimeClass.GetObjectMeta(), - ) - return objs -} - -// ToManifestTable return a map with Kind name as key and component's instance as value -func (ms *ManifestResultSet) ToManifestTable() ManifestTable { - manifests := make(map[string]interface{}, 0) - manifests[ms.MachineConfig.Kind] = ms.MachineConfig - manifests[ms.KubeletConfig.Kind] = ms.KubeletConfig - manifests[ms.Tuned.Kind] = ms.Tuned - manifests[ms.RuntimeClass.Kind] = ms.RuntimeClass - return manifests -} - -// GetNewComponents return a list of all component's instances that should be created according to profile -func GetNewComponents(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) (*ManifestResultSet, error) { - machineConfigPoolSelector := profilecomponent.GetMachineConfigPoolSelector(profile, profileMCP) - - mc, err := machineconfig.New(profile) - if err != nil { - return nil, err - } - - kc, err := kubeletconfig.New(profile, machineConfigPoolSelector) - if err != nil { - return nil, err - } - - performanceTuned, err := tuned.NewNodePerformance(profile) - if err != nil { - return nil, err - } - - runtimeClass := runtimeclass.New(profile, machineconfig.HighPerformanceRuntime) - - manifestResultSet := ManifestResultSet{ - MachineConfig: mc, - KubeletConfig: kc, - Tuned: performanceTuned, - RuntimeClass: runtimeClass, - } - return &manifestResultSet, nil -} diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile.go b/pkg/pao/controller/performanceprofile/components/profile/profile.go deleted file mode 100644 index 7c2d1134b..000000000 --- a/pkg/pao/controller/performanceprofile/components/profile/profile.go +++ /dev/null @@ -1,57 +0,0 @@ -package profile - -import ( - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -// GetMachineConfigPoolSelector returns the MachineConfigPoolSelector from the CR or a default value calculated based on NodeSelector -func GetMachineConfigPoolSelector(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) map[string]string { - // we do not really need profile.spec.machineConfigPoolSelector anymore, but we should use it for backward compatability - if profile.Spec.MachineConfigPoolSelector != nil { - return profile.Spec.MachineConfigPoolSelector - } - - if profileMCP != nil { - return profileMCP.Labels - } - - // we still need to construct the machineConfigPoolSelector when the command called from the render command - return getDefaultLabel(profile) -} - -// GetMachineConfigLabel returns the MachineConfigLabels from the CR or a default value calculated based on NodeSelector -func GetMachineConfigLabel(profile *performancev2.PerformanceProfile) map[string]string { - if profile.Spec.MachineConfigLabel != nil { - return profile.Spec.MachineConfigLabel - } - - return getDefaultLabel(profile) -} - -func getDefaultLabel(profile *performancev2.PerformanceProfile) map[string]string { - nodeSelectorKey, _ := components.GetFirstKeyAndValue(profile.Spec.NodeSelector) - // no error handling needed, it's validated already - _, nodeRole, _ := components.SplitLabelKey(nodeSelectorKey) - - labels := make(map[string]string) - labels[components.MachineConfigRoleLabelKey] = nodeRole - - return labels -} - -// IsPaused returns whether or not a performance profile's reconcile loop is paused -func IsPaused(profile *performancev2.PerformanceProfile) bool { - if profile.Annotations == nil { - return false - } - - isPaused, ok := profile.Annotations[performancev2.PerformanceProfilePauseAnnotation] - if ok && isPaused == "true" { - return true - } - - return false -} diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go b/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go deleted file mode 100644 index 06be7f26b..000000000 --- a/pkg/pao/controller/performanceprofile/components/profile/profile_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package profile - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestProfile(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Profile Suite") -} diff --git a/pkg/pao/controller/performanceprofile/components/profile/profile_test.go b/pkg/pao/controller/performanceprofile/components/profile/profile_test.go deleted file mode 100644 index b83639813..000000000 --- a/pkg/pao/controller/performanceprofile/components/profile/profile_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package profile - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - - testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing" -) - -const ( - NodeSelectorRole = "barRole" -) - -var _ = Describe("PerformanceProfile", func() { - var profile *performancev2.PerformanceProfile - - BeforeEach(func() { - profile = testutils.NewPerformanceProfile("test") - }) - - Describe("Defaulting", func() { - It("should return given MachineConfigLabel", func() { - labels := GetMachineConfigLabel(profile) - k, v := components.GetFirstKeyAndValue(labels) - Expect(k).To(Equal(testutils.MachineConfigLabelKey)) - Expect(v).To(Equal(testutils.MachineConfigLabelValue)) - - }) - - It("should return given MachineConfigPoolSelector", func() { - labels := GetMachineConfigPoolSelector(profile, nil) - k, v := components.GetFirstKeyAndValue(labels) - Expect(k).To(Equal(testutils.MachineConfigPoolLabelKey)) - Expect(v).To(Equal(testutils.MachineConfigPoolLabelValue)) - }) - - It("should return default MachineConfigLabels", func() { - profile.Spec.MachineConfigLabel = nil - setValidNodeSelector(profile) - - labels := GetMachineConfigLabel(profile) - k, v := components.GetFirstKeyAndValue(labels) - Expect(k).To(Equal(components.MachineConfigRoleLabelKey)) - Expect(v).To(Equal(NodeSelectorRole)) - - }) - - It("should return default MachineConfigPoolSelector", func() { - profile.Spec.MachineConfigPoolSelector = nil - setValidNodeSelector(profile) - - labels := GetMachineConfigPoolSelector(profile, nil) - k, v := components.GetFirstKeyAndValue(labels) - Expect(k).To(Equal(components.MachineConfigRoleLabelKey)) - Expect(v).To(Equal(NodeSelectorRole)) - - }) - }) -}) - -func setValidNodeSelector(profile *performancev2.PerformanceProfile) { - selector := make(map[string]string) - selector["fooDomain/"+NodeSelectorRole] = "" - profile.Spec.NodeSelector = selector -} diff --git a/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go b/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go deleted file mode 100644 index dcac37736..000000000 --- a/pkg/pao/controller/performanceprofile/components/runtimeclass/runtimeclass.go +++ /dev/null @@ -1,27 +0,0 @@ -package runtimeclass - -import ( - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - - nodev1beta1 "k8s.io/api/node/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// New returns a new RuntimeClass object -func New(profile *performancev2.PerformanceProfile, handler string) *nodev1beta1.RuntimeClass { - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - return &nodev1beta1.RuntimeClass{ - TypeMeta: metav1.TypeMeta{ - Kind: "RuntimeClass", - APIVersion: "node.k8s.io/v1beta1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Handler: handler, - Scheduling: &nodev1beta1.Scheduling{ - NodeSelector: profile.Spec.NodeSelector, - }, - } -} diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned.go deleted file mode 100644 index 5268949ab..000000000 --- a/pkg/pao/controller/performanceprofile/components/tuned/tuned.go +++ /dev/null @@ -1,204 +0,0 @@ -package tuned - -import ( - "bytes" - "fmt" - "path/filepath" - "strconv" - "strings" - "text/template" - - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" - - assets "github.com/openshift/cluster-node-tuning-operator/assets/pao" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" -) - -const ( - cmdlineDelimiter = " " - templateIsolatedCpus = "IsolatedCpus" - templateStaticIsolation = "StaticIsolation" - templateDefaultHugepagesSize = "DefaultHugepagesSize" - templateHugepages = "Hugepages" - templateAdditionalArgs = "AdditionalArgs" - templateGloballyDisableIrqLoadBalancing = "GloballyDisableIrqLoadBalancing" - templateNetDevices = "NetDevices" - nfConntrackHashsize = "nf_conntrack_hashsize=131072" -) - -func new(name string, profiles []tunedv1.TunedProfile, recommends []tunedv1.TunedRecommend) *tunedv1.Tuned { - return &tunedv1.Tuned{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tunedv1.SchemeGroupVersion.String(), - Kind: "Tuned", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: components.NamespaceNodeTuningOperator, - }, - Spec: tunedv1.TunedSpec{ - Profile: profiles, - Recommend: recommends, - }, - } -} - -// NewNodePerformance returns tuned profile for performance sensitive workflows -func NewNodePerformance(profile *performancev2.PerformanceProfile) (*tunedv1.Tuned, error) { - templateArgs := make(map[string]string) - - if profile.Spec.CPU.Isolated != nil { - templateArgs[templateIsolatedCpus] = string(*profile.Spec.CPU.Isolated) - if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false { - templateArgs[templateStaticIsolation] = strconv.FormatBool(true) - } - } - - if profile.Spec.HugePages != nil { - var defaultHugepageSize performancev2.HugePageSize - if profile.Spec.HugePages.DefaultHugePagesSize != nil { - defaultHugepageSize = *profile.Spec.HugePages.DefaultHugePagesSize - templateArgs[templateDefaultHugepagesSize] = string(defaultHugepageSize) - } - - var is2MHugepagesRequested *bool - var hugepages []string - for _, page := range profile.Spec.HugePages.Pages { - // we can not allocate huge pages on the specific NUMA node via kernel boot arguments - if page.Node != nil { - // a user requested to allocate 2M huge pages on the specific NUMA node, - // append dummy kernel arguments - if page.Size == components.HugepagesSize2M && is2MHugepagesRequested == nil { - is2MHugepagesRequested = pointer.BoolPtr(true) - } - continue - } - - // a user requested to allocated 2M huge pages without specifying the node - // we need to append 2M hugepages kernel arguments anyway, no need to add dummy - // kernel arguments - if page.Size == components.HugepagesSize2M { - is2MHugepagesRequested = pointer.BoolPtr(false) - } - - hugepages = append(hugepages, fmt.Sprintf("hugepagesz=%s", string(page.Size))) - hugepages = append(hugepages, fmt.Sprintf("hugepages=%d", page.Count)) - } - - // append dummy 2M huge pages kernel arguments to guarantee that the kernel will create 2M related files - // and directories under the filesystem - if is2MHugepagesRequested != nil && *is2MHugepagesRequested { - if defaultHugepageSize == components.HugepagesSize1G { - hugepages = append(hugepages, fmt.Sprintf("hugepagesz=%s", components.HugepagesSize2M)) - hugepages = append(hugepages, fmt.Sprintf("hugepages=%d", 0)) - } - } - - hugepagesArgs := strings.Join(hugepages, cmdlineDelimiter) - templateArgs[templateHugepages] = hugepagesArgs - } - - if profile.Spec.AdditionalKernelArgs != nil { - templateArgs[templateAdditionalArgs] = strings.Join(profile.Spec.AdditionalKernelArgs, cmdlineDelimiter) - } - - if profile.Spec.GloballyDisableIrqLoadBalancing != nil && - *profile.Spec.GloballyDisableIrqLoadBalancing == true { - templateArgs[templateGloballyDisableIrqLoadBalancing] = strconv.FormatBool(true) - } - - //set default [net] field first, override if needed. - templateArgs[templateNetDevices] = fmt.Sprintf("[net]\n%s", nfConntrackHashsize) - if profile.Spec.Net != nil && *profile.Spec.Net.UserLevelNetworking && profile.Spec.CPU.Reserved != nil { - - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - if err != nil { - return nil, err - } - reserveCPUcount := reservedSet.Size() - - var devices []string - var tunedNetDevicesOutput []string - netPluginSequence := 0 - netPluginString := "" - - for _, device := range profile.Spec.Net.Devices { - devices = make([]string, 0) - if device.DeviceID != nil { - devices = append(devices, "^ID_MODEL_ID="+*device.DeviceID) - } - if device.VendorID != nil { - devices = append(devices, "^ID_VENDOR_ID="+*device.VendorID) - } - if device.InterfaceName != nil { - deviceNameAmendedRegex := strings.Replace(*device.InterfaceName, "*", ".*", -1) - if strings.HasPrefix(*device.InterfaceName, "!") { - devices = append(devices, "^INTERFACE="+"(?!"+deviceNameAmendedRegex+")") - } else { - devices = append(devices, "^INTERFACE="+deviceNameAmendedRegex) - } - } - // Final regex format can be one of the following formats: - // devicesUdevRegex = ^INTERFACE=InterfaceName' (InterfaceName can also hold .* representing * wildcard) - // devicesUdevRegex = ^INTERFACE(?!InterfaceName)' (InterfaceName can starting with ?! represents ! wildcard) - // devicesUdevRegex = ^ID_VENDOR_ID=VendorID' - // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID' - // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID[\s\S]*^INTERFACE=InterfaceName' - // devicesUdevRegex = ^ID_MODEL_ID=DeviceID[\s\S]*^ID_VENDOR_ID=VendorID[\s\S]*^INTERFACE=(?!InterfaceName)' - // Important note: The order of the key must be preserved - INTERFACE, ID_MODEL_ID, ID_VENDOR_ID (in that order) - devicesUdevRegex := strings.Join(devices, `[\s\S]*`) - if netPluginSequence > 0 { - netPluginString = "_" + strconv.Itoa(netPluginSequence) - } - tunedNetDevicesOutput = append(tunedNetDevicesOutput, fmt.Sprintf("\n[net%s]\ntype=net\ndevices_udev_regex=%s\nchannels=combined %d\n%s", netPluginString, devicesUdevRegex, reserveCPUcount, nfConntrackHashsize)) - netPluginSequence++ - } - //nfConntrackHashsize - if len(tunedNetDevicesOutput) == 0 { - templateArgs[templateNetDevices] = fmt.Sprintf("[net]\nchannels=combined %d\n%s", reserveCPUcount, nfConntrackHashsize) - } else { - templateArgs[templateNetDevices] = strings.Join(tunedNetDevicesOutput, "") - } - } - - profileData, err := getProfileData(filepath.Join("tuned", components.ProfileNamePerformance), templateArgs) - if err != nil { - return nil, err - } - - name := components.GetComponentName(profile.Name, components.ProfileNamePerformance) - profiles := []tunedv1.TunedProfile{ - { - Name: &name, - Data: &profileData, - }, - } - - priority := uint64(20) - recommends := []tunedv1.TunedRecommend{ - { - Profile: &name, - Priority: &priority, - MachineConfigLabels: profilecomponent.GetMachineConfigLabel(profile), - }, - } - return new(name, profiles, recommends), nil -} - -func getProfileData(tunedTemplate string, data interface{}) (string, error) { - profileTemplate, err := template.ParseFS(assets.Tuned, tunedTemplate) - if err != nil { - return "", err - } - - profile := &bytes.Buffer{} - if err := profileTemplate.Execute(profile, data); err != nil { - return "", err - } - return profile.String(), nil -} diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go deleted file mode 100644 index 57672cce1..000000000 --- a/pkg/pao/controller/performanceprofile/components/tuned/tuned_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package tuned - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestTuned(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Tuned Suite") -} diff --git a/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go b/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go deleted file mode 100644 index cf202a78d..000000000 --- a/pkg/pao/controller/performanceprofile/components/tuned/tuned_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package tuned - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/ghodss/yaml" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing" - - cpuset "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" -) - -const expectedMatchSelector = ` - - machineConfigLabels: - mcKey: mcValue -` - -var ( - cmdlineCPUsPartitioning = regexp.MustCompile(`\s*cmdline_cpu_part=\+\s*nohz=on\s+rcu_nocbs=\${isolated_cores}\s+tuned.non_isolcpus=\${not_isolated_cpumask}\s+intel_pstate=disable\s+nosoftlockup\s*`) - cmdlineRealtimeWithCPUBalancing = regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=managed_irq,\${isolated_cores}\s+systemd.cpu_affinity=\${not_isolated_cores_expanded}\s*`) - cmdlineRealtimeWithoutCPUBalancing = regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=domain,managed_irq,\${isolated_cores}\s+systemd.cpu_affinity=\${not_isolated_cores_expanded}\s*`) - cmdlineHugepages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s*`) - cmdlineAdditionalArg = regexp.MustCompile(`\s*cmdline_additionalArg=\+\s*test1=val1\s+test2=val2\s*`) - cmdlineDummy2MHugePages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s+hugepagesz=2M\s+hugepages=0\s*`) - cmdlineMultipleHugePages = regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=1G\s+hugepagesz=1G\s+hugepages=4\s+hugepagesz=2M\s+hugepages=128\s*`) -) - -var additionalArgs = []string{"test1=val1", "test2=val2"} - -var _ = Describe("Tuned", func() { - var profile *performancev2.PerformanceProfile - - BeforeEach(func() { - profile = testutils.NewPerformanceProfile("test") - }) - - getTunedManifest := func(profile *performancev2.PerformanceProfile) string { - tuned, err := NewNodePerformance(profile) - Expect(err).ToNot(HaveOccurred()) - y, err := yaml.Marshal(tuned) - Expect(err).ToNot(HaveOccurred()) - return string(y) - } - - Context("with worker performance profile", func() { - It("should generate yaml with expected parameters", func() { - manifest := getTunedManifest(profile) - - Expect(manifest).To(ContainSubstring(expectedMatchSelector)) - Expect(manifest).To(ContainSubstring(fmt.Sprintf("isolated_cores=4-7"))) - By("Populating CPU partitioning cmdline") - Expect(cmdlineCPUsPartitioning.MatchString(manifest)).To(BeTrue()) - By("Populating realtime cmdline") - Expect(cmdlineRealtimeWithCPUBalancing.MatchString(manifest)).To(BeTrue()) - By("Populating hugepages cmdline") - Expect(cmdlineHugepages.MatchString(manifest)).To(BeTrue()) - By("Populating empty additional kernel arguments cmdline") - Expect(manifest).To(ContainSubstring("cmdline_additionalArg=")) - - }) - - It("should generate yaml with expected parameters for Isolated balancing disabled", func() { - profile.Spec.CPU.BalanceIsolated = pointer.BoolPtr(false) - manifest := getTunedManifest(profile) - - Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(manifest)).To(BeTrue()) - }) - - It("should generate yaml with expected parameters for additional kernel arguments", func() { - profile.Spec.AdditionalKernelArgs = additionalArgs - manifest := getTunedManifest(profile) - - Expect(cmdlineAdditionalArg.MatchString(manifest)).To(BeTrue()) - }) - - It("should not allocate hugepages on the specific NUMA node via kernel arguments", func() { - manifest := getTunedManifest(profile) - Expect(strings.Count(manifest, "hugepagesz=")).Should(BeNumerically("==", 2)) - Expect(strings.Count(manifest, "hugepages=")).Should(BeNumerically("==", 3)) - - profile.Spec.HugePages.Pages[0].Node = pointer.Int32Ptr(1) - manifest = getTunedManifest(profile) - Expect(strings.Count(manifest, "hugepagesz=")).Should(BeNumerically("==", 1)) - Expect(strings.Count(manifest, "hugepages=")).Should(BeNumerically("==", 2)) - }) - - Context("with 1G default huge pages", func() { - Context("with requested 2M huge pages allocation on the specified node", func() { - It("should append the dummy 2M huge pages kernel arguments", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ - Size: components.HugepagesSize2M, - Count: 128, - Node: pointer.Int32Ptr(0), - }) - - manifest := getTunedManifest(profile) - Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeTrue()) - }) - }) - - Context("with requested 2M huge pages allocation via kernel arguments", func() { - It("should not append the dummy 2M kernel arguments", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ - Size: components.HugepagesSize2M, - Count: 128, - }) - - manifest := getTunedManifest(profile) - Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse()) - Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeTrue()) - }) - }) - - Context("without requested 2M hugepages", func() { - It("should not append dummy 2M huge pages kernel arguments", func() { - manifest := getTunedManifest(profile) - Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse()) - }) - }) - - Context("with requested 2M huge pages allocation on the specified node and via kernel arguments", func() { - It("should not append the dummy 2M kernel arguments", func() { - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ - Size: components.HugepagesSize2M, - Count: 128, - Node: pointer.Int32Ptr(0), - }) - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ - Size: components.HugepagesSize2M, - Count: 128, - }) - - manifest := getTunedManifest(profile) - Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse()) - Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeTrue()) - }) - }) - }) - - Context("with 2M default huge pages", func() { - Context("with requested 2M huge pages allocation on the specified node", func() { - It("should not append the dummy 2M huge pages kernel arguments", func() { - defaultSize := performancev2.HugePageSize(components.HugepagesSize2M) - profile.Spec.HugePages.DefaultHugePagesSize = &defaultSize - profile.Spec.HugePages.Pages = append(profile.Spec.HugePages.Pages, performancev2.HugePage{ - Size: components.HugepagesSize2M, - Count: 128, - Node: pointer.Int32Ptr(0), - }) - - manifest := getTunedManifest(profile) - Expect(cmdlineDummy2MHugePages.MatchString(manifest)).To(BeFalse()) - Expect(cmdlineMultipleHugePages.MatchString(manifest)).To(BeFalse()) - }) - }) - }) - - Context("with user level networking enabled", func() { - Context("with default net device queues (all devices set)", func() { - It("should set the default netqueues count to reserved CPUs count", func() { - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - } - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*channels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by interface name with reserved CPUs count", func() { - netDeviceName := "eth*" - //regex field should be: devices_udev_regex=^INTERFACE=eth.* - devicesUdevRegex := "\\^INTERFACE=" + strings.Replace(netDeviceName, "*", "\\.\\*", -1) - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - InterfaceName: &netDeviceName, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by negative interface name with reserved CPUs count", func() { - netDeviceName := "!ens5" - //regex field should be: devices_udev_regex=^INTERFACE=(?!ens5) - devicesUdevRegex := "\\^INTERFACE=\\(\\?!" + strings.Replace(netDeviceName, "*", "\\.\\*", -1) + "\\)" - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - InterfaceName: &netDeviceName, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by specific vendor with reserved CPUs count", func() { - netDeviceVendorID := "0x1af4" - //regex field should be: devices_udev_regex=^ID_VENDOR_ID=0x1af4 - devicesUdevRegex := "\\^ID_VENDOR_ID=" + netDeviceVendorID - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - VendorID: &netDeviceVendorID, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by specific vendor and model with reserved CPUs count", func() { - netDeviceVendorID := "0x1af4" - netDeviceModelID := "0x1000" - //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\s\S]*^ID_VENDOR_ID=0x1af4 - devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - DeviceID: &netDeviceModelID, - VendorID: &netDeviceVendorID, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by specific vendor,model and interface name with reserved CPUs count", func() { - netDeviceName := "ens5" - netDeviceVendorID := "0x1af4" - netDeviceModelID := "0x1000" - //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\s\S]*^ID_VENDOR_ID=0x1af4[\s\S]*^INTERFACE=ens5 - devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=` + netDeviceName - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - InterfaceName: &netDeviceName, - DeviceID: &netDeviceModelID, - VendorID: &netDeviceVendorID, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - It("should set by specific vendor,model and negative interface name with reserved CPUs count", func() { - netDeviceName := "!ens5" - netDeviceVendorID := "0x1af4" - netDeviceModelID := "0x1000" - //regex field should be: devices_udev_regex=^ID_MODEL_ID=0x1000[\\s\\S]*^ID_VENDOR_ID=0x1af4[\\s\\S]*^INTERFACE=(?!ens5) - devicesUdevRegex := `\^ID_MODEL_ID=` + netDeviceModelID + `\[\\\\s\\\\S]\*\^ID_VENDOR_ID=` + netDeviceVendorID + `\[\\\\s\\\\S]\*\^INTERFACE=\(\?!` + netDeviceName + `\)` - - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - Devices: []performancev2.Device{ - { - InterfaceName: &netDeviceName, - DeviceID: &netDeviceModelID, - VendorID: &netDeviceVendorID, - }, - }} - manifest := getTunedManifest(profile) - reservedSet, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - reserveCPUcount := reservedSet.Size() - channelsRegex := regexp.MustCompile(`\s*\[net\]\\ntype=net\\ndevices_udev_regex=` + devicesUdevRegex + `\\nchannels=combined\s*` + strconv.Itoa(reserveCPUcount) + `\s*`) - Expect(channelsRegex.MatchString(manifest)).To(BeTrue()) - }) - }) - }) - }) -}) diff --git a/pkg/pao/controller/performanceprofile/components/utils.go b/pkg/pao/controller/performanceprofile/components/utils.go deleted file mode 100644 index 81869b04b..000000000 --- a/pkg/pao/controller/performanceprofile/components/utils.go +++ /dev/null @@ -1,141 +0,0 @@ -package components - -import ( - "bytes" - "fmt" - "math/big" - "strconv" - "strings" - - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" -) - -const bitsInWord = 32 - -// GetComponentName returns the component name for the specific performance profile -func GetComponentName(profileName string, prefix string) string { - return fmt.Sprintf("%s-%s", prefix, profileName) -} - -// GetFirstKeyAndValue return the first key / value pair of a map -func GetFirstKeyAndValue(m map[string]string) (string, string) { - for k, v := range m { - return k, v - } - return "", "" -} - -// SplitLabelKey returns the given label key splitted up in domain and role -func SplitLabelKey(s string) (domain, role string, err error) { - parts := strings.Split(s, "/") - if len(parts) != 2 { - return "", "", fmt.Errorf("Can't split %s", s) - } - return parts[0], parts[1], nil -} - -// CPUListToHexMask converts a list of cpus into a cpu mask represented in hexdecimal -func CPUListToHexMask(cpulist string) (hexMask string, err error) { - cpus, err := cpuset.Parse(cpulist) - if err != nil { - return "", err - } - - reservedCPUs := cpus.ToSlice() - currMask := big.NewInt(0) - for _, cpu := range reservedCPUs { - x := new(big.Int).Lsh(big.NewInt(1), uint(cpu)) - currMask.Or(currMask, x) - } - return fmt.Sprintf("%064x", currMask), nil -} - -// CPUListToMaskList converts a list of cpus into a cpu mask represented -// in a list of hexadecimal mask devided by a delimiter "," -func CPUListToMaskList(cpulist string) (hexMask string, err error) { - maskStr, err := CPUListToHexMask(cpulist) - if err != nil { - return "", nil - } - index := 0 - for index < (len(maskStr) - 8) { - if maskStr[index:index+8] != "00000000" { - break - } - index = index + 8 - } - var b bytes.Buffer - for index <= (len(maskStr) - 16) { - b.WriteString(maskStr[index : index+8]) - b.WriteString(",") - index = index + 8 - } - b.WriteString(maskStr[index : index+8]) - trimmedCPUMaskList := b.String() - return trimmedCPUMaskList, nil -} - -// CPULists allows easy checks between reserved and isolated cpu set definitons -type CPULists struct { - reserved cpuset.CPUSet - isolated cpuset.CPUSet -} - -// Intersect returns cpu ids found in both the provided cpuLists, if any -func (c *CPULists) Intersect() []int { - commonSet := c.reserved.Intersection(c.isolated) - return commonSet.ToSlice() -} - -// CountIsolated returns how many isolated cpus where specified -func (c *CPULists) CountIsolated() int { - return c.isolated.Size() -} - -// NewCPULists parse text representations of reserved and isolated cpusets definiton and returns a CPULists object -func NewCPULists(reservedList, isolatedList string) (*CPULists, error) { - var err error - reserved, err := cpuset.Parse(reservedList) - if err != nil { - return nil, err - } - isolated, err := cpuset.Parse(isolatedList) - if err != nil { - return nil, err - } - return &CPULists{ - reserved: reserved, - isolated: isolated, - }, nil -} - -// CPUMaskToCPUSet parses a CPUSet received in a Mask Format, see: -// https://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS -func CPUMaskToCPUSet(cpuMask string) (cpuset.CPUSet, error) { - chunks := strings.Split(cpuMask, ",") - - // reverse the chunks order - n := len(chunks) - for i := 0; i < n/2; i++ { - chunks[i], chunks[n-i-1] = chunks[n-i-1], chunks[i] - } - - builder := cpuset.NewBuilder() - for i, chunk := range chunks { - if chunk == "" { - return cpuset.NewCPUSet(), fmt.Errorf("malformed CPU mask %q chunk %q", cpuMask, chunk) - } - mask, err := strconv.ParseUint(chunk, 16, bitsInWord) - if err != nil { - return cpuset.NewCPUSet(), fmt.Errorf("failed to parse the CPU mask %q: %v", cpuMask, err) - } - for j := 0; j < bitsInWord; j++ { - if mask&1 == 1 { - builder.Add(i*bitsInWord + j) - } - mask >>= 1 - } - } - - return builder.Result(), nil -} diff --git a/pkg/pao/controller/performanceprofile/components/utils_suite_test.go b/pkg/pao/controller/performanceprofile/components/utils_suite_test.go deleted file mode 100644 index 5a874af5f..000000000 --- a/pkg/pao/controller/performanceprofile/components/utils_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package components - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestComponetsUtils(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Tuned Suite") -} diff --git a/pkg/pao/controller/performanceprofile/components/utils_test.go b/pkg/pao/controller/performanceprofile/components/utils_test.go deleted file mode 100644 index a8eceea6c..000000000 --- a/pkg/pao/controller/performanceprofile/components/utils_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package components - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" -) - -type listToMask struct { - cpuList string - cpuMask string -} - -var cpuListToMask = []listToMask{ - {"0", "00000001"}, - {"2-3", "0000000c"}, - {"3,4,53-55,61-63", "e0e00000,00000018"}, - {"0-127", "ffffffff,ffffffff,ffffffff,ffffffff"}, - {"0-255", "ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff,ffffffff"}, -} - -func intersectHelper(cpuListA, cpuListB string) ([]int, error) { - cpuLists, err := NewCPULists(cpuListA, cpuListB) - if err != nil { - return nil, err - } - return cpuLists.Intersect(), nil -} - -var _ = Describe("Components utils", func() { - Context("Convert CPU list to CPU mask", func() { - It("should generate a valid CPU mask from CPU list ", func() { - for _, cpuEntry := range cpuListToMask { - cpuMask, err := CPUListToMaskList(cpuEntry.cpuList) - Expect(err).ToNot(HaveOccurred()) - Expect(cpuMask).Should(Equal(cpuEntry.cpuMask)) - } - }) - }) - - Context("Convert CPU mask to CPU list", func() { - It("should generate a valid CPU list from CPU mask ", func() { - for _, cpuEntry := range cpuListToMask { - cpuSetFromList, err := cpuset.Parse(cpuEntry.cpuList) - Expect(err).ToNot(HaveOccurred()) - cpuSetFromMask, err := CPUMaskToCPUSet(cpuEntry.cpuMask) - Expect(err).ToNot(HaveOccurred()) - - Expect(cpuSetFromList).Should(Equal(cpuSetFromMask)) - } - }) - }) - - Context("Check intersections between CPU sets", func() { - It("should detect invalid cpulists", func() { - var cpuListInvalid = []string{ - "0-", "-", "-3", ",,", ",2", "-,", "0-1,", "0,1,3,,4", - } - - for _, entry := range cpuListInvalid { - _, err := intersectHelper(entry, entry) - Expect(err).To(HaveOccurred()) - - _, err = intersectHelper(entry, "0-3") - Expect(err).To(HaveOccurred()) - - _, err = intersectHelper("0-3", entry) - Expect(err).To(HaveOccurred()) - } - }) - - It("should detect cpulist intersections", func() { - type cpuListIntersect struct { - cpuListA string - cpuListB string - result []int - } - - var cpuListIntersectTestcases = []cpuListIntersect{ - {"", "0-3", []int{}}, - {"0-3", "", []int{}}, - {"0-3", "4-15", []int{}}, - {"0-3", "8-15", []int{}}, - {"0-3", "0-15", []int{0, 1, 2, 3}}, - {"0-3", "3-15", []int{3}}, - {"3-7", "6-15", []int{6, 7}}, - } - - for _, entry := range cpuListIntersectTestcases { - res, err := intersectHelper(entry.cpuListA, entry.cpuListB) - Expect(err).ToNot(HaveOccurred()) - - Expect(len(res)).To(Equal(len(entry.result))) - for idx, cpuid := range res { - Expect(cpuid).To(Equal(entry.result[idx])) - } - } - }) - }) -}) diff --git a/pkg/pao/controller/performanceprofile_controller.go b/pkg/pao/controller/performanceprofile_controller.go deleted file mode 100644 index e1122fdb7..000000000 --- a/pkg/pao/controller/performanceprofile_controller.go +++ /dev/null @@ -1,696 +0,0 @@ -/* - - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "reflect" - "time" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/manifestset" - profileutil "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - olmv1 "github.com/operator-framework/api/pkg/operators/v1" - olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - - corev1 "k8s.io/api/core/v1" - nodev1beta1 "k8s.io/api/node/v1beta1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - k8serros "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "k8s.io/klog" - - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/builder" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -const finalizer = "foreground-deletion" - -// PerformanceProfileReconciler reconciles a PerformanceProfile object -type PerformanceProfileReconciler struct { - client.Client - Scheme *runtime.Scheme - Recorder record.EventRecorder - olmRemoved bool -} - -// SetupWithManager creates a new PerformanceProfile Controller and adds it to the Manager. -// The Manager will set fields on the Controller and Start it when the Manager is Started. -func (r *PerformanceProfileReconciler) SetupWithManager(mgr ctrl.Manager) error { - - // we want to initate reconcile loop only on change under labels or spec of the object - p := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if !validateUpdateEvent(&e) { - return false - } - - return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() || - !apiequality.Semantic.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) - }, - } - - kubeletPredicates := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if !validateUpdateEvent(&e) { - return false - } - - kubeletOld := e.ObjectOld.(*mcov1.KubeletConfig) - kubeletNew := e.ObjectNew.(*mcov1.KubeletConfig) - - return kubeletOld.GetGeneration() != kubeletNew.GetGeneration() || - !reflect.DeepEqual(kubeletOld.Status.Conditions, kubeletNew.Status.Conditions) - }, - } - - mcpPredicates := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if !validateUpdateEvent(&e) { - return false - } - - mcpOld := e.ObjectOld.(*mcov1.MachineConfigPool) - mcpNew := e.ObjectNew.(*mcov1.MachineConfigPool) - - return !reflect.DeepEqual(mcpOld.Status.Conditions, mcpNew.Status.Conditions) - }, - } - - tunedProfilePredicates := predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - if !validateUpdateEvent(&e) { - return false - } - - tunedProfileOld := e.ObjectOld.(*tunedv1.Profile) - tunedProfileNew := e.ObjectNew.(*tunedv1.Profile) - - return !reflect.DeepEqual(tunedProfileOld.Status.Conditions, tunedProfileNew.Status.Conditions) - }, - } - - return ctrl.NewControllerManagedBy(mgr). - For(&performancev2.PerformanceProfile{}). - Owns(&mcov1.MachineConfig{}, builder.WithPredicates(p)). - Owns(&mcov1.KubeletConfig{}, builder.WithPredicates(kubeletPredicates)). - Owns(&tunedv1.Tuned{}, builder.WithPredicates(p)). - Owns(&nodev1beta1.RuntimeClass{}, builder.WithPredicates(p)). - Watches( - &source.Kind{Type: &mcov1.MachineConfigPool{}}, - handler.EnqueueRequestsFromMapFunc(r.mcpToPerformanceProfile), - builder.WithPredicates(mcpPredicates)). - Watches( - &source.Kind{Type: &tunedv1.Profile{}}, - handler.EnqueueRequestsFromMapFunc(r.tunedProfileToPerformanceProfile), - builder.WithPredicates(tunedProfilePredicates), - ). - Complete(r) -} - -// uninstall PAO OLM operator and all of its artifacts -// this should apply only from version 4.11 -func (r *PerformanceProfileReconciler) removeOLMOperator() error { - paoCSV := "performance-addon-operator.v4.10.0" - subscription := &olmv1alpha1.Subscription{} - key := types.NamespacedName{ - Name: "performance-addon-operator", - Namespace: "openshift-performance-addon-operator", - } - - if err := r.Get(context.TODO(), key, subscription); err != nil { - if !errors.IsNotFound(err) { - return err - } - } else { - klog.Infof("Removing performance-addon-operator subscription %s", subscription.Name) - if subscription.Status.CurrentCSV != paoCSV { - return fmt.Errorf("Subscription to be removed contains a current CSV version %s which is different from %s", subscription.Status.CurrentCSV, paoCSV) - } - if err := r.Delete(context.TODO(), subscription); err != nil { - return err - } - } - - csv, err := r.getCSV(paoCSV, "openshift-performance-addon-operator") - if err != nil { - if !errors.IsNotFound(err) { - klog.Infof("Performance addon operator csv %s not found. no need for OLM content removal.", paoCSV) - } - } else { - klog.Infof("Removing performance-addon-operator CSV %s", paoCSV) - if err := r.Delete(context.TODO(), csv); err != nil { - return err - } - } - - operatorGroup := &olmv1.OperatorGroup{} - key = types.NamespacedName{ - Name: "performance-addon-operator", - Namespace: "openshift-performance-addon-operator", - } - - if err := r.Get(context.TODO(), key, operatorGroup); err != nil { - if !errors.IsNotFound(err) { - return err - } - } else { - klog.Infof("Removing performance-addon-operator operator group %s", operatorGroup.Name) - if err := r.Delete(context.TODO(), operatorGroup); err != nil { - return err - } - } - - return nil -} - -func (r *PerformanceProfileReconciler) getCSV(name, namespace string) (*olmv1alpha1.ClusterServiceVersion, error) { - csv := &olmv1alpha1.ClusterServiceVersion{} - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - err := r.Get(context.TODO(), key, csv) - return csv, err -} - -func (r *PerformanceProfileReconciler) mcpToPerformanceProfile(mcpObj client.Object) []reconcile.Request { - mcp := &mcov1.MachineConfigPool{} - - key := types.NamespacedName{ - Namespace: mcpObj.GetNamespace(), - Name: mcpObj.GetName(), - } - if err := r.Get(context.TODO(), key, mcp); err != nil { - klog.Errorf("failed to get the machine config pool %+v: %v", key, err) - return nil - } - - profiles := &performancev2.PerformanceProfileList{} - if err := r.List(context.TODO(), profiles); err != nil { - klog.Errorf("failed to get performance profiles: %v", err) - return nil - } - - var requests []reconcile.Request - for i, profile := range profiles.Items { - profileNodeSelector := labels.Set(profile.Spec.NodeSelector) - mcpNodeSelector, err := metav1.LabelSelectorAsSelector(mcp.Spec.NodeSelector) - if err != nil { - klog.Errorf("failed to parse the selector %v: %v", mcp.Spec.NodeSelector, err) - return nil - } - - if mcpNodeSelector.Matches(profileNodeSelector) { - requests = append(requests, reconcile.Request{NamespacedName: namespacedName(&profiles.Items[i])}) - } - } - - return requests -} - -func (r *PerformanceProfileReconciler) tunedProfileToPerformanceProfile(tunedProfileObj client.Object) []reconcile.Request { - node := &corev1.Node{} - key := types.NamespacedName{ - // the tuned profile name is the same as node - Name: tunedProfileObj.GetName(), - } - - if err := r.Get(context.TODO(), key, node); err != nil { - klog.Errorf("failed to get the tuned profile %+v: %v", key, err) - return nil - } - - profiles := &performancev2.PerformanceProfileList{} - if err := r.List(context.TODO(), profiles); err != nil { - klog.Errorf("failed to get performance profiles: %v", err) - return nil - } - - var requests []reconcile.Request - for i, profile := range profiles.Items { - profileNodeSelector := labels.Set(profile.Spec.NodeSelector) - nodeLabels := labels.Set(node.Labels) - if profileNodeSelector.AsSelector().Matches(nodeLabels) { - requests = append(requests, reconcile.Request{NamespacedName: namespacedName(&profiles.Items[i])}) - } - } - - return requests -} - -func validateUpdateEvent(e *event.UpdateEvent) bool { - if e.ObjectOld == nil { - klog.Error("Update event has no old runtime object to update") - return false - } - if e.ObjectNew == nil { - klog.Error("Update event has no new runtime object for update") - return false - } - - return true -} - -// +kubebuilder:rbac:groups="",resources=events,verbs=* -// +kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch -// +kubebuilder:rbac:groups=performance.openshift.io,resources=performanceprofiles;performanceprofiles/status;performanceprofiles/finalizers,verbs=* -// +kubebuilder:rbac:groups=machineconfiguration.openshift.io,resources=machineconfigs;machineconfigpools;kubeletconfigs,verbs=* -// +kubebuilder:rbac:groups=tuned.openshift.io,resources=tuneds;profiles,verbs=* -// +kubebuilder:rbac:groups=node.k8s.io,resources=runtimeclasses,verbs=* -// +kubebuilder:rbac:groups=config.openshift.io,resources=infrastructures,verbs=get;list;watch -// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=core,resources=pods;services;services/finalizers;configmaps,verbs=* -// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=coordination.k8s.io,resources=leases,verbs=create;get;list;update -// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=apps,resourceNames=performance-operator,resources=deployments/finalizers,verbs=update -// +kubebuilder:rbac:namespace="openshift-cluster-node-tuning-operator",groups=monitoring.coreos.com,resources=servicemonitors,verbs=* - -// Reconcile reads that state of the cluster for a PerformanceProfile object and makes changes based on the state read -// and what is in the PerformanceProfile.Spec -// Note: -// The Controller will requeue the Request to be processed again if the returned error is non-nil or -// Result.Requeue is true, otherwise upon completion it will remove the work from the queue. -func (r *PerformanceProfileReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - klog.Info("Reconciling PerformanceProfile") - - // This should be deprecated in openshift 4.12 - if !r.olmRemoved { - if err := r.removeOLMOperator(); err != nil { - return reconcile.Result{}, err - } else { - r.olmRemoved = true - } - } - - // Fetch the PerformanceProfile instance - instance := &performancev2.PerformanceProfile{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serros.IsNotFound(err) { - // Request object not found, could have been deleted after reconcile request. - // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. - // Return and don't requeue - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - if instance.DeletionTimestamp != nil { - // delete components - if err := r.deleteComponents(instance); err != nil { - klog.Errorf("failed to delete components: %v", err) - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "Deletion failed", "Failed to delete components: %v", err) - return reconcile.Result{}, err - } - r.Recorder.Eventf(instance, corev1.EventTypeNormal, "Deletion succeeded", "Succeeded to delete all components") - - if r.isComponentsExist(instance) { - return reconcile.Result{RequeueAfter: 10 * time.Second}, nil - } - - // remove finalizer - if hasFinalizer(instance, finalizer) { - removeFinalizer(instance, finalizer) - if err := r.Update(ctx, instance); err != nil { - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - } - - // add finalizer - if !hasFinalizer(instance, finalizer) { - instance.Finalizers = append(instance.Finalizers, finalizer) - instance.Status.Conditions = r.getProgressingConditions("DeploymentStarting", "Deployment is starting") - if err := r.Update(ctx, instance); err != nil { - return reconcile.Result{}, err - } - - // we exit reconcile loop because we will have additional update reconcile - return reconcile.Result{}, nil - } - - profileMCP, err := r.getMachineConfigPoolByProfile(instance) - if err != nil { - conditions := r.getDegradedConditions(conditionFailedToFindMachineConfigPool, err.Error()) - if err := r.updateStatus(instance, conditions); err != nil { - klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err) - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - - if err := validateProfileMachineConfigPool(instance, profileMCP); err != nil { - conditions := r.getDegradedConditions(conditionBadMachineConfigLabels, err.Error()) - if err := r.updateStatus(instance, conditions); err != nil { - klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err) - return reconcile.Result{}, err - } - - return reconcile.Result{}, nil - } - - // remove components with the old name after the upgrade - if err := r.deleteDeprecatedComponents(instance); err != nil { - return ctrl.Result{}, err - } - - // apply components - result, err := r.applyComponents(instance, profileMCP) - if err != nil { - klog.Errorf("failed to deploy performance profile %q components: %v", instance.Name, err) - r.Recorder.Eventf(instance, corev1.EventTypeWarning, "Creation failed", "Failed to create all components: %v", err) - conditions := r.getDegradedConditions(conditionReasonComponentsCreationFailed, err.Error()) - if err := r.updateStatus(instance, conditions); err != nil { - klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err) - return reconcile.Result{}, err - } - return reconcile.Result{}, err - } - - // get kubelet false condition - conditions, err := r.getKubeletConditionsByProfile(instance) - if err != nil { - return r.updateDegradedCondition(instance, conditionFailedGettingKubeletStatus, err) - } - - // get MCP degraded conditions - if conditions == nil { - conditions, err = r.getMCPDegradedCondition(profileMCP) - if err != nil { - return r.updateDegradedCondition(instance, conditionFailedGettingMCPStatus, err) - } - } - - // get tuned profile degraded conditions - if conditions == nil { - conditions, err = r.getTunedConditionsByProfile(instance) - if err != nil { - return r.updateDegradedCondition(instance, conditionFailedGettingTunedProfileStatus, err) - } - } - - // if conditions were not added due to machine config pool status change then set as available - if conditions == nil { - conditions = r.getAvailableConditions() - } - - if err := r.updateStatus(instance, conditions); err != nil { - klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err) - // we still want to requeue after some, also in case of error, to avoid chance of multiple reboots - if result != nil { - return *result, nil - } - - return reconcile.Result{}, err - } - - if result != nil { - return *result, nil - } - - return ctrl.Result{}, nil -} - -func (r *PerformanceProfileReconciler) deleteDeprecatedComponents(instance *performancev2.PerformanceProfile) error { - // remove the machine config with the deprecated name - name := components.GetComponentName(instance.Name, components.ComponentNamePrefix) - return r.deleteMachineConfig(name) -} - -func (r *PerformanceProfileReconciler) updateDegradedCondition(instance *performancev2.PerformanceProfile, conditionState string, conditionError error) (ctrl.Result, error) { - conditions := r.getDegradedConditions(conditionState, conditionError.Error()) - if err := r.updateStatus(instance, conditions); err != nil { - klog.Errorf("failed to update performance profile %q status: %v", instance.Name, err) - return reconcile.Result{}, err - } - return reconcile.Result{}, conditionError -} - -func (r *PerformanceProfileReconciler) applyComponents(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) (*reconcile.Result, error) { - if profileutil.IsPaused(profile) { - klog.Infof("Ignoring reconcile loop for pause performance profile %s", profile.Name) - return nil, nil - } - - components, err := manifestset.GetNewComponents(profile, profileMCP) - if err != nil { - return nil, err - } - for _, componentObj := range components.ToObjects() { - if err := controllerutil.SetControllerReference(profile, componentObj, r.Scheme); err != nil { - return nil, err - } - } - - // get mutated machine config - mcMutated, err := r.getMutatedMachineConfig(components.MachineConfig) - if err != nil { - return nil, err - } - - // get mutated kubelet config - kcMutated, err := r.getMutatedKubeletConfig(components.KubeletConfig) - if err != nil { - return nil, err - } - - // get mutated performance tuned - performanceTunedMutated, err := r.getMutatedTuned(components.Tuned) - if err != nil { - return nil, err - } - - // get mutated RuntimeClass - runtimeClassMutated, err := r.getMutatedRuntimeClass(components.RuntimeClass) - if err != nil { - return nil, err - } - - updated := mcMutated != nil || - kcMutated != nil || - performanceTunedMutated != nil || - runtimeClassMutated != nil - - // does not update any resources, if it no changes to relevant objects and just continue to the status update - if !updated { - return nil, nil - } - - if mcMutated != nil { - if err := r.createOrUpdateMachineConfig(mcMutated); err != nil { - return nil, err - } - } - - if performanceTunedMutated != nil { - if err := r.createOrUpdateTuned(performanceTunedMutated, profile.Name); err != nil { - return nil, err - } - } - - if kcMutated != nil { - if err := r.createOrUpdateKubeletConfig(kcMutated); err != nil { - return nil, err - } - } - - if runtimeClassMutated != nil { - if err := r.createOrUpdateRuntimeClass(runtimeClassMutated); err != nil { - return nil, err - } - } - - r.Recorder.Eventf(profile, corev1.EventTypeNormal, "Creation succeeded", "Succeeded to create all components") - return &reconcile.Result{}, nil -} - -func (r *PerformanceProfileReconciler) deleteComponents(profile *performancev2.PerformanceProfile) error { - tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance) - if err := r.deleteTuned(tunedName, components.NamespaceNodeTuningOperator); err != nil { - return err - } - - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - if err := r.deleteKubeletConfig(name); err != nil { - return err - } - - if err := r.deleteRuntimeClass(name); err != nil { - return err - } - - if err := r.deleteMachineConfig(machineconfig.GetMachineConfigName(profile)); err != nil { - return err - } - - return nil - -} - -func (r *PerformanceProfileReconciler) isComponentsExist(profile *performancev2.PerformanceProfile) bool { - tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance) - if _, err := r.getTuned(tunedName, components.NamespaceNodeTuningOperator); !k8serros.IsNotFound(err) { - klog.Infof("Tuned %q custom resource is still exists under the namespace %q", tunedName, components.NamespaceNodeTuningOperator) - return true - } - - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - if _, err := r.getKubeletConfig(name); !k8serros.IsNotFound(err) { - klog.Infof("Kubelet Config %q exists under the cluster", name) - return true - } - - if _, err := r.getRuntimeClass(name); !k8serros.IsNotFound(err) { - klog.Infof("Runtime class %q exists under the cluster", name) - return true - } - - if _, err := r.getMachineConfig(machineconfig.GetMachineConfigName(profile)); !k8serros.IsNotFound(err) { - klog.Infof("Machine Config %q exists under the cluster", name) - return true - } - - return false -} - -func hasFinalizer(profile *performancev2.PerformanceProfile, finalizer string) bool { - for _, f := range profile.Finalizers { - if f == finalizer { - return true - } - } - return false -} - -func removeFinalizer(profile *performancev2.PerformanceProfile, finalizer string) { - var finalizers []string - for _, f := range profile.Finalizers { - if f == finalizer { - continue - } - finalizers = append(finalizers, f) - } - profile.Finalizers = finalizers -} - -func namespacedName(obj metav1.Object) types.NamespacedName { - return types.NamespacedName{ - Namespace: obj.GetNamespace(), - Name: obj.GetName(), - } -} - -func (r *PerformanceProfileReconciler) getMachineConfigPoolByProfile(profile *performancev2.PerformanceProfile) (*mcov1.MachineConfigPool, error) { - nodeSelector := labels.Set(profile.Spec.NodeSelector) - - mcpList := &mcov1.MachineConfigPoolList{} - if err := r.Client.List(context.TODO(), mcpList); err != nil { - return nil, err - } - - filteredMCPList := filterMCPDuplications(mcpList.Items) - - var profileMCPs []*mcov1.MachineConfigPool - for i := range filteredMCPList { - mcp := &mcpList.Items[i] - - if mcp.Spec.NodeSelector == nil { - continue - } - - mcpNodeSelector, err := metav1.LabelSelectorAsSelector(mcp.Spec.NodeSelector) - if err != nil { - return nil, err - } - - if mcpNodeSelector.Matches(nodeSelector) { - profileMCPs = append(profileMCPs, mcp) - } - } - - if len(profileMCPs) == 0 { - return nil, fmt.Errorf("failed to find MCP with the node selector that matches labels %q", nodeSelector.String()) - } - - if len(profileMCPs) > 1 { - return nil, fmt.Errorf("more than one MCP found that matches performance profile node selector %q", nodeSelector.String()) - } - - return profileMCPs[0], nil -} - -func filterMCPDuplications(mcps []mcov1.MachineConfigPool) []mcov1.MachineConfigPool { - var filtered []mcov1.MachineConfigPool - items := map[string]mcov1.MachineConfigPool{} - for _, mcp := range mcps { - if _, exists := items[mcp.Name]; !exists { - items[mcp.Name] = mcp - filtered = append(filtered, mcp) - } - } - - return filtered -} - -func validateProfileMachineConfigPool(profile *performancev2.PerformanceProfile, profileMCP *mcov1.MachineConfigPool) error { - if profileMCP.Spec.MachineConfigSelector.Size() == 0 { - return fmt.Errorf("the MachineConfigPool %q machineConfigSelector is nil", profileMCP.Name) - } - - if len(profileMCP.Labels) == 0 { - return fmt.Errorf("the MachineConfigPool %q does not have any labels that can be used to bind it together with KubeletConfing", profileMCP.Name) - } - - // we can not guarantee that our generated label for the machine config selector will be the right one - // but at least we can validate that the MCP will consume our machine config - machineConfigLabels := profileutil.GetMachineConfigLabel(profile) - mcpMachineConfigSelector, err := metav1.LabelSelectorAsSelector(profileMCP.Spec.MachineConfigSelector) - if err != nil { - return err - } - - if !mcpMachineConfigSelector.Matches(labels.Set(machineConfigLabels)) { - if len(profile.Spec.MachineConfigLabel) > 0 { - return fmt.Errorf("the machine config labels %v provided via profile.spec.machineConfigLabel do not match the MachineConfigPool %q machineConfigSelector %q", machineConfigLabels, profileMCP.Name, mcpMachineConfigSelector.String()) - } - - return fmt.Errorf("the machine config labels %v generated from the profile.spec.nodeSelector %v do not match the MachineConfigPool %q machineConfigSelector %q", machineConfigLabels, profile.Spec.NodeSelector, profileMCP.Name, mcpMachineConfigSelector.String()) - } - - return nil -} diff --git a/pkg/pao/controller/performanceprofile_controller_suite_test.go b/pkg/pao/controller/performanceprofile_controller_suite_test.go deleted file mode 100644 index d8293d661..000000000 --- a/pkg/pao/controller/performanceprofile_controller_suite_test.go +++ /dev/null @@ -1,30 +0,0 @@ -package controller - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - configv1 "github.com/openshift/api/config/v1" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - olmv1 "github.com/operator-framework/api/pkg/operators/v1" - olmv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" - - "k8s.io/client-go/kubernetes/scheme" -) - -func TestPerformanceProfile(t *testing.T) { - RegisterFailHandler(Fail) - - // add resources API to default scheme - performancev2.AddToScheme(scheme.Scheme) - configv1.AddToScheme(scheme.Scheme) - mcov1.AddToScheme(scheme.Scheme) - tunedv1.AddToScheme(scheme.Scheme) - olmv1.AddToScheme(scheme.Scheme) - olmv1alpha1.AddToScheme(scheme.Scheme) - - RunSpecs(t, "Performance Profile Suite") -} diff --git a/pkg/pao/controller/performanceprofile_controller_test.go b/pkg/pao/controller/performanceprofile_controller_test.go deleted file mode 100644 index 95e0a9d4c..000000000 --- a/pkg/pao/controller/performanceprofile_controller_test.go +++ /dev/null @@ -1,884 +0,0 @@ -package controller - -import ( - "context" - "encoding/json" - "fmt" - "regexp" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - . "github.com/onsi/gomega/gstruct" - - igntypes "github.com/coreos/ignition/config/v2_2/types" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/kubeletconfig" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/runtimeclass" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/tuned" - testutils "github.com/openshift/cluster-node-tuning-operator/pkg/pao/utils/testing" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - corev1 "k8s.io/api/core/v1" - nodev1beta1 "k8s.io/api/node/v1beta1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/record" - "k8s.io/utils/pointer" - - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("Controller", func() { - var request reconcile.Request - var profile *performancev2.PerformanceProfile - var profileMCP *mcov1.MachineConfigPool - - BeforeEach(func() { - profileMCP = testutils.NewProfileMCP() - profile = testutils.NewPerformanceProfile("test") - request = reconcile.Request{ - NamespacedName: types.NamespacedName{ - Namespace: metav1.NamespaceNone, - Name: profile.Name, - }, - } - }) - - It("should add finalizer to the performance profile", func() { - r := newFakeReconciler(profile, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - Expect(hasFinalizer(updatedProfile, finalizer)).To(Equal(true)) - }) - - Context("with profile with finalizer", func() { - BeforeEach(func() { - profile.Finalizers = append(profile.Finalizers, finalizer) - }) - - It("should create all resources on first reconcile loop", func() { - r := newFakeReconciler(profile, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: machineconfig.GetMachineConfigName(profile), - Namespace: metav1.NamespaceNone, - } - - // verify MachineConfig creation - mc := &mcov1.MachineConfig{} - err := r.Get(context.TODO(), key, mc) - Expect(err).ToNot(HaveOccurred()) - - key = types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceNone, - } - - // verify KubeletConfig creation - kc := &mcov1.KubeletConfig{} - err = r.Get(context.TODO(), key, kc) - Expect(err).ToNot(HaveOccurred()) - - // verify RuntimeClass creation - runtimeClass := &nodev1beta1.RuntimeClass{} - err = r.Get(context.TODO(), key, runtimeClass) - Expect(err).ToNot(HaveOccurred()) - - // verify tuned performance creation - tunedPerformance := &tunedv1.Tuned{} - key.Name = components.GetComponentName(profile.Name, components.ProfileNamePerformance) - key.Namespace = components.NamespaceNodeTuningOperator - err = r.Get(context.TODO(), key, tunedPerformance) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should create event on the second reconcile loop", func() { - r := newFakeReconciler(profile, profileMCP) - - Expect(reconcileTimes(r, request, 2)).To(Equal(reconcile.Result{})) - - // verify creation event - fakeRecorder, ok := r.Recorder.(*record.FakeRecorder) - Expect(ok).To(BeTrue()) - event := <-fakeRecorder.Events - Expect(event).To(ContainSubstring("Creation succeeded")) - }) - - It("should update the profile status", func() { - r := newFakeReconciler(profile, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - // verify performance profile status - Expect(len(updatedProfile.Status.Conditions)).To(Equal(4)) - - // verify profile conditions - progressingCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionProgressing) - Expect(progressingCondition).ToNot(BeNil()) - Expect(progressingCondition.Status).To(Equal(corev1.ConditionFalse)) - availableCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionAvailable) - Expect(availableCondition).ToNot(BeNil()) - Expect(availableCondition.Status).To(Equal(corev1.ConditionTrue)) - }) - - It("should promote kubelet config failure condition", func() { - r := newFakeReconciler(profile, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - - kc := &mcov1.KubeletConfig{} - err := r.Get(context.TODO(), key, kc) - Expect(err).ToNot(HaveOccurred()) - - now := time.Now() - kc.Status.Conditions = []mcov1.KubeletConfigCondition{ - { - Type: mcov1.KubeletConfigFailure, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now.Add(time.Minute)}, - Reason: "Test failure condition", - Message: "Test failure condition", - }, - { - Type: mcov1.KubeletConfigSuccess, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - Reason: "Test succeed condition", - Message: "Test succeed condition", - }, - } - Expect(r.Update(context.TODO(), kc)).ToNot(HaveOccurred()) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key = types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue)) - Expect(degradedCondition.Message).To(Equal("Test failure condition")) - Expect(degradedCondition.Reason).To(Equal(conditionKubeletFailed)) - }) - - It("should not promote old failure condition", func() { - r := newFakeReconciler(profile, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - - kc := &mcov1.KubeletConfig{} - err := r.Get(context.TODO(), key, kc) - Expect(err).ToNot(HaveOccurred()) - - now := time.Now() - kc.Status.Conditions = []mcov1.KubeletConfigCondition{ - { - Type: mcov1.KubeletConfigFailure, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - Reason: "Test failure condition", - Message: "Test failure condition", - }, - { - Type: mcov1.KubeletConfigSuccess, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now.Add(time.Minute)}, - Reason: "Test succeed condition", - Message: "Test succeed condition", - }, - } - Expect(r.Update(context.TODO(), kc)).ToNot(HaveOccurred()) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key = types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionFalse)) - }) - - It("should remove outdated tuned objects", func() { - tunedOutdatedA, err := tuned.NewNodePerformance(profile) - Expect(err).ToNot(HaveOccurred()) - tunedOutdatedA.Name = "outdated-a" - tunedOutdatedA.OwnerReferences = []metav1.OwnerReference{ - {Name: profile.Name}, - } - tunedOutdatedB, err := tuned.NewNodePerformance(profile) - Expect(err).ToNot(HaveOccurred()) - tunedOutdatedB.Name = "outdated-b" - tunedOutdatedB.OwnerReferences = []metav1.OwnerReference{ - {Name: profile.Name}, - } - r := newFakeReconciler(profile, tunedOutdatedA, tunedOutdatedB, profileMCP) - - keyA := types.NamespacedName{ - Name: tunedOutdatedA.Name, - Namespace: tunedOutdatedA.Namespace, - } - ta := &tunedv1.Tuned{} - err = r.Get(context.TODO(), keyA, ta) - Expect(err).ToNot(HaveOccurred()) - - keyB := types.NamespacedName{ - Name: tunedOutdatedA.Name, - Namespace: tunedOutdatedA.Namespace, - } - tb := &tunedv1.Tuned{} - err = r.Get(context.TODO(), keyB, tb) - Expect(err).ToNot(HaveOccurred()) - - result, err := r.Reconcile(context.TODO(), request) - Expect(err).ToNot(HaveOccurred()) - Expect(result).To(Equal(reconcile.Result{})) - - tunedList := &tunedv1.TunedList{} - err = r.List(context.TODO(), tunedList) - Expect(err).ToNot(HaveOccurred()) - Expect(len(tunedList.Items)).To(Equal(1)) - tunedName := components.GetComponentName(profile.Name, components.ProfileNamePerformance) - Expect(tunedList.Items[0].Name).To(Equal(tunedName)) - }) - - It("should create nothing when pause annotation is set", func() { - profile.Annotations = map[string]string{performancev2.PerformanceProfilePauseAnnotation: "true"} - r := newFakeReconciler(profile, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - - // verify MachineConfig wasn't created - mc := &mcov1.MachineConfig{} - err := r.Get(context.TODO(), key, mc) - Expect(errors.IsNotFound(err)).To(BeTrue()) - - // verify that KubeletConfig wasn't created - kc := &mcov1.KubeletConfig{} - err = r.Get(context.TODO(), key, kc) - Expect(errors.IsNotFound(err)).To(BeTrue()) - - // verify no machine config pool was created - mcp := &mcov1.MachineConfigPool{} - err = r.Get(context.TODO(), key, mcp) - Expect(errors.IsNotFound(err)).To(BeTrue()) - - // verify tuned Performance wasn't created - tunedPerformance := &tunedv1.Tuned{} - key.Name = components.ProfileNamePerformance - key.Namespace = components.NamespaceNodeTuningOperator - err = r.Get(context.TODO(), key, tunedPerformance) - Expect(errors.IsNotFound(err)).To(BeTrue()) - - // verify that no RuntimeClass was created - runtimeClass := &nodev1beta1.RuntimeClass{} - err = r.Get(context.TODO(), key, runtimeClass) - Expect(errors.IsNotFound(err)).To(BeTrue()) - }) - - Context("when all components exist", func() { - var mc *mcov1.MachineConfig - var kc *mcov1.KubeletConfig - var tunedPerformance *tunedv1.Tuned - var runtimeClass *nodev1beta1.RuntimeClass - - BeforeEach(func() { - var err error - - mc, err = machineconfig.New(profile) - Expect(err).ToNot(HaveOccurred()) - - mcpSelectorKey, mcpSelectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err = kubeletconfig.New(profile, map[string]string{mcpSelectorKey: mcpSelectorValue}) - Expect(err).ToNot(HaveOccurred()) - - tunedPerformance, err = tuned.NewNodePerformance(profile) - Expect(err).ToNot(HaveOccurred()) - - runtimeClass = runtimeclass.New(profile, machineconfig.HighPerformanceRuntime) - }) - - It("should not record new create event", func() { - r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - // verify that no creation event created - fakeRecorder, ok := r.Recorder.(*record.FakeRecorder) - Expect(ok).To(BeTrue()) - - select { - case _ = <-fakeRecorder.Events: - Fail("the recorder should not have new events") - default: - } - }) - - It("should update MC when RT kernel gets disabled", func() { - profile.Spec.RealTimeKernel.Enabled = pointer.BoolPtr(false) - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: machineconfig.GetMachineConfigName(profile), - Namespace: metav1.NamespaceNone, - } - - // verify MachineConfig update - mc := &mcov1.MachineConfig{} - err := r.Get(context.TODO(), key, mc) - Expect(err).ToNot(HaveOccurred()) - - Expect(mc.Spec.KernelType).To(Equal(machineconfig.MCKernelDefault)) - }) - - It("should update MC, KC and Tuned when CPU params change", func() { - reserved := performancev2.CPUSet("0-1") - isolated := performancev2.CPUSet("2-3") - profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceNone, - } - - By("Verifying KC update for reserved") - kc := &mcov1.KubeletConfig{} - err := r.Get(context.TODO(), key, kc) - Expect(err).ToNot(HaveOccurred()) - Expect(string(kc.Spec.KubeletConfig.Raw)).To(ContainSubstring(fmt.Sprintf(`"reservedSystemCPUs":"%s"`, string(*profile.Spec.CPU.Reserved)))) - - By("Verifying Tuned update for isolated") - key = types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err = r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - Expect(*t.Spec.Profile[0].Data).To(ContainSubstring("isolated_cores=" + string(*profile.Spec.CPU.Isolated))) - }) - - It("should add isolcpus with managed_irq flag to tuned profile when balanced set to true", func() { - reserved := performancev2.CPUSet("0-1") - isolated := performancev2.CPUSet("2-3") - profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - BalanceIsolated: pointer.BoolPtr(true), - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err := r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - cmdlineRealtimeWithoutCPUBalancing := regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=managed_irq\s*`) - Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue()) - }) - - It("should add isolcpus with domain,managed_irq flags to tuned profile when balanced set to false", func() { - reserved := performancev2.CPUSet("0-1") - isolated := performancev2.CPUSet("2-3") - profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - BalanceIsolated: pointer.BoolPtr(false), - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err := r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - cmdlineRealtimeWithoutCPUBalancing := regexp.MustCompile(`\s*cmdline_realtime=\+\s*tsc=nowatchdog\s+intel_iommu=on\s+iommu=pt\s+isolcpus=domain,managed_irq,\s*`) - Expect(cmdlineRealtimeWithoutCPUBalancing.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue()) - }) - - It("should update MC when Hugepages params change without node added", func() { - size := performancev2.HugePageSize("2M") - profile.Spec.HugePages = &performancev2.HugePages{ - DefaultHugePagesSize: &size, - Pages: []performancev2.HugePage{ - { - Count: 8, - Size: size, - }, - }, - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - By("Verifying Tuned profile update") - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err := r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - cmdlineHugepages := regexp.MustCompile(`\s*cmdline_hugepages=\+\s*default_hugepagesz=2M\s+hugepagesz=2M\s+hugepages=8\s*`) - Expect(cmdlineHugepages.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue()) - }) - - It("should update Tuned when Hugepages params change with node added", func() { - size := performancev2.HugePageSize("2M") - profile.Spec.HugePages = &performancev2.HugePages{ - DefaultHugePagesSize: &size, - Pages: []performancev2.HugePage{ - { - Count: 8, - Size: size, - Node: pointer.Int32Ptr(0), - }, - }, - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - By("Verifying Tuned update") - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err := r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - cmdlineHugepages := regexp.MustCompile(`\s*cmdline_hugepages=\+\s*`) - Expect(cmdlineHugepages.MatchString(*t.Spec.Profile[0].Data)).To(BeTrue()) - - By("Verifying MC update") - key = types.NamespacedName{ - Name: machineconfig.GetMachineConfigName(profile), - Namespace: metav1.NamespaceNone, - } - mc := &mcov1.MachineConfig{} - err = r.Get(context.TODO(), key, mc) - Expect(err).ToNot(HaveOccurred()) - - config := &igntypes.Config{} - err = json.Unmarshal(mc.Spec.Config.Raw, config) - Expect(err).ToNot(HaveOccurred()) - - Expect(config.Systemd.Units).To(ContainElement(MatchFields(IgnoreMissing|IgnoreExtras, Fields{ - "Contents": And( - ContainSubstring("Description=Hugepages"), - ContainSubstring("Environment=HUGEPAGES_COUNT=8"), - ContainSubstring("Environment=HUGEPAGES_SIZE=2048"), - ContainSubstring("Environment=NUMA_NODE=0"), - ), - }))) - - }) - - It("should update status with generated tuned", func() { - r := newFakeReconciler(profile, mc, kc, tunedPerformance, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - t := &tunedv1.Tuned{} - err := r.Get(context.TODO(), key, t) - Expect(err).ToNot(HaveOccurred()) - tunedNamespacedName := namespacedName(t).String() - updatedProfile := &performancev2.PerformanceProfile{} - key = types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - Expect(updatedProfile.Status.Tuned).NotTo(BeNil()) - Expect(*updatedProfile.Status.Tuned).To(Equal(tunedNamespacedName)) - }) - - It("should update status with generated runtime class", func() { - r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceAll, - } - runtimeClass := &nodev1beta1.RuntimeClass{} - err := r.Get(context.TODO(), key, runtimeClass) - Expect(err).ToNot(HaveOccurred()) - - updatedProfile := &performancev2.PerformanceProfile{} - key = types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceAll, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - Expect(updatedProfile.Status.RuntimeClass).NotTo(BeNil()) - Expect(*updatedProfile.Status.RuntimeClass).To(Equal(runtimeClass.Name)) - }) - - It("should update status when MCP is degraded", func() { - mcpReason := "mcpReason" - mcpMessage := "MCP message" - - mcp := &mcov1.MachineConfigPool{ - TypeMeta: metav1.TypeMeta{ - APIVersion: mcov1.GroupVersion.String(), - Kind: "MachineConfigPool", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "mcp-test", - Labels: map[string]string{ - testutils.MachineConfigPoolLabelKey: testutils.MachineConfigPoolLabelValue, - }, - }, - Spec: mcov1.MachineConfigPoolSpec{ - NodeSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"nodekey": "nodeValue"}, - }, - MachineConfigSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: testutils.MachineConfigLabelKey, - Operator: metav1.LabelSelectorOpIn, - Values: []string{testutils.MachineConfigLabelValue}, - }, - }, - }, - }, - Status: mcov1.MachineConfigPoolStatus{ - Conditions: []mcov1.MachineConfigPoolCondition{ - { - Type: mcov1.MachineConfigPoolNodeDegraded, - Status: corev1.ConditionTrue, - Reason: mcpReason, - Message: mcpMessage, - }, - }, - }, - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, mcp) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - // verify performance profile status - Expect(len(updatedProfile.Status.Conditions)).To(Equal(4)) - - // verify profile conditions - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition).ToNot(BeNil()) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue)) - Expect(degradedCondition.Reason).To(Equal(conditionReasonMCPDegraded)) - Expect(degradedCondition.Message).To(ContainSubstring(mcpMessage)) - }) - - It("should update status when TunedProfile is degraded", func() { - tunedReason := "tunedReason" - tunedMessage := "Tuned message" - - tuned := &tunedv1.Profile{ - ObjectMeta: metav1.ObjectMeta{ - Name: "tuned-profile-test", - }, - Status: tunedv1.ProfileStatus{ - Conditions: []tunedv1.ProfileStatusCondition{ - { - Type: tunedv1.TunedDegraded, - Status: corev1.ConditionTrue, - Reason: tunedReason, - Message: tunedMessage, - }, - { - Type: tunedv1.TunedProfileApplied, - Status: corev1.ConditionFalse, - Reason: tunedReason, - Message: tunedMessage, - }, - }, - }, - } - - nodes := &corev1.NodeList{ - Items: []corev1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "tuned-profile-test", - Labels: map[string]string{ - "nodekey": "nodeValue", - }, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "tuned-profile-test2", - }, - }, - }, - } - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, tuned, nodes, profileMCP) - - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - // verify performance profile status - Expect(len(updatedProfile.Status.Conditions)).To(Equal(4)) - - // verify profile conditions - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition).ToNot(BeNil()) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue)) - Expect(degradedCondition.Reason).To(Equal(conditionReasonTunedDegraded)) - Expect(degradedCondition.Message).To(ContainSubstring(tunedMessage)) - }) - }) - - When("the provided machine config labels are different from one specified under the machine config pool", func() { - It("should move the performance profile to the degraded state", func() { - profileMCP.Spec.MachineConfigSelector = &metav1.LabelSelector{ - MatchLabels: map[string]string{"wrongKey": "bad"}, - } - r := newFakeReconciler(profile, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - // verify performance profile status - Expect(len(updatedProfile.Status.Conditions)).To(Equal(4)) - - // verify profile conditions - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition).ToNot(BeNil()) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue)) - Expect(degradedCondition.Reason).To(Equal(conditionBadMachineConfigLabels)) - Expect(degradedCondition.Message).To(ContainSubstring("provided via profile.spec.machineConfigLabel do not match the MachineConfigPool")) - }) - }) - - When("the generated machine config labels are different from one specified under the machine config pool", func() { - It("should move the performance profile to the degraded state", func() { - profileMCP.Spec.MachineConfigSelector = &metav1.LabelSelector{ - MatchLabels: map[string]string{"wrongKey": "bad"}, - } - profile.Spec.MachineConfigLabel = nil - r := newFakeReconciler(profile, profileMCP) - Expect(reconcileTimes(r, request, 1)).To(Equal(reconcile.Result{})) - - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: metav1.NamespaceNone, - } - Expect(r.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - - // verify performance profile status - Expect(len(updatedProfile.Status.Conditions)).To(Equal(4)) - - // verify profile conditions - degradedCondition := conditionsv1.FindStatusCondition(updatedProfile.Status.Conditions, conditionsv1.ConditionDegraded) - Expect(degradedCondition).ToNot(BeNil()) - Expect(degradedCondition.Status).To(Equal(corev1.ConditionTrue)) - Expect(degradedCondition.Reason).To(Equal(conditionBadMachineConfigLabels)) - Expect(degradedCondition.Message).To(ContainSubstring("generated from the profile.spec.nodeSelector")) - }) - }) - }) - - Context("with profile with deletion timestamp", func() { - BeforeEach(func() { - profile.DeletionTimestamp = &metav1.Time{ - Time: time.Now(), - } - profile.Finalizers = append(profile.Finalizers, finalizer) - }) - - It("should remove all components and remove the finalizer on first reconcile loop", func() { - mc, err := machineconfig.New(profile) - Expect(err).ToNot(HaveOccurred()) - - mcpSelectorKey, mcpSelectorValue := components.GetFirstKeyAndValue(profile.Spec.MachineConfigPoolSelector) - kc, err := kubeletconfig.New(profile, map[string]string{mcpSelectorKey: mcpSelectorValue}) - Expect(err).ToNot(HaveOccurred()) - - tunedPerformance, err := tuned.NewNodePerformance(profile) - Expect(err).ToNot(HaveOccurred()) - - runtimeClass := runtimeclass.New(profile, machineconfig.HighPerformanceRuntime) - - r := newFakeReconciler(profile, mc, kc, tunedPerformance, runtimeClass, profileMCP) - result, err := r.Reconcile(context.TODO(), request) - Expect(err).ToNot(HaveOccurred()) - Expect(result).To(Equal(reconcile.Result{})) - - // verify that controller deleted all components - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - - // verify MachineConfig deletion - err = r.Get(context.TODO(), key, mc) - Expect(errors.IsNotFound(err)).To(Equal(true)) - - // verify KubeletConfig deletion - err = r.Get(context.TODO(), key, kc) - Expect(errors.IsNotFound(err)).To(Equal(true)) - - // verify RuntimeClass deletion - err = r.Get(context.TODO(), key, runtimeClass) - Expect(errors.IsNotFound(err)).To(Equal(true)) - - // verify tuned real-time kernel deletion - key.Name = components.GetComponentName(profile.Name, components.ProfileNamePerformance) - key.Namespace = components.NamespaceNodeTuningOperator - err = r.Get(context.TODO(), key, tunedPerformance) - Expect(errors.IsNotFound(err)).To(Equal(true)) - - // verify profile deletion - key.Name = profile.Name - key.Namespace = metav1.NamespaceNone - updatedProfile := &performancev2.PerformanceProfile{} - err = r.Get(context.TODO(), key, updatedProfile) - Expect(errors.IsNotFound(err)).To(Equal(true)) - }) - }) - - It("should map machine config pool to the performance profile", func() { - mcp := &mcov1.MachineConfigPool{ - TypeMeta: metav1.TypeMeta{ - APIVersion: mcov1.GroupVersion.String(), - Kind: "MachineConfigPool", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "mcp-test", - }, - Spec: mcov1.MachineConfigPoolSpec{ - NodeSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"nodekey": "nodeValue"}, - }, - MachineConfigSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: testutils.MachineConfigLabelKey, - Operator: metav1.LabelSelectorOpIn, - Values: []string{testutils.MachineConfigLabelValue}, - }, - }, - }, - }, - } - r := newFakeReconciler(profile, mcp) - requests := r.mcpToPerformanceProfile(mcp) - Expect(requests).NotTo(BeEmpty()) - Expect(requests[0].Name).To(Equal(profile.Name)) - }) -}) - -func reconcileTimes(reconciler *PerformanceProfileReconciler, request reconcile.Request, times int) reconcile.Result { - var result reconcile.Result - var err error - for i := 0; i < times; i++ { - result, err = reconciler.Reconcile(context.TODO(), request) - Expect(err).ToNot(HaveOccurred()) - } - return result -} - -// newFakeReconciler returns a new reconcile.Reconciler with a fake client -func newFakeReconciler(initObjects ...runtime.Object) *PerformanceProfileReconciler { - fakeClient := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithRuntimeObjects(initObjects...).Build() - fakeRecorder := record.NewFakeRecorder(10) - return &PerformanceProfileReconciler{ - Client: fakeClient, - Scheme: scheme.Scheme, - Recorder: fakeRecorder, - } -} diff --git a/pkg/pao/controller/resources.go b/pkg/pao/controller/resources.go deleted file mode 100644 index 4830836a5..000000000 --- a/pkg/pao/controller/resources.go +++ /dev/null @@ -1,331 +0,0 @@ -package controller - -import ( - "context" - "encoding/json" - "reflect" - - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - nodev1beta1 "k8s.io/api/node/v1beta1" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" -) - -func mergeMaps(src map[string]string, dst map[string]string) { - for k, v := range src { - // NOTE: it will override destination values - dst[k] = v - } -} - -// TODO: we should merge all create, get and delete methods - -func (r *PerformanceProfileReconciler) getMachineConfig(name string) (*mcov1.MachineConfig, error) { - mc := &mcov1.MachineConfig{} - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - if err := r.Get(context.TODO(), key, mc); err != nil { - return nil, err - } - return mc, nil -} - -func (r *PerformanceProfileReconciler) getMutatedMachineConfig(mc *mcov1.MachineConfig) (*mcov1.MachineConfig, error) { - existing, err := r.getMachineConfig(mc.Name) - if errors.IsNotFound(err) { - return mc, nil - } - - if err != nil { - return nil, err - } - - mutated := existing.DeepCopy() - mergeMaps(mc.Annotations, mutated.Annotations) - mergeMaps(mc.Labels, mutated.Labels) - mutated.Spec = mc.Spec - - // we do not need to update if it no change between mutated and existing object - if reflect.DeepEqual(existing.Spec, mutated.Spec) && - apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) && - apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) { - return nil, nil - } - - return mutated, nil -} - -func (r *PerformanceProfileReconciler) createOrUpdateMachineConfig(mc *mcov1.MachineConfig) error { - _, err := r.getMachineConfig(mc.Name) - if errors.IsNotFound(err) { - klog.Infof("Create machine-config %q", mc.Name) - if err := r.Create(context.TODO(), mc); err != nil { - return err - } - return nil - } - - if err != nil { - return err - } - - klog.Infof("Update machine-config %q", mc.Name) - return r.Update(context.TODO(), mc) -} - -func (r *PerformanceProfileReconciler) deleteMachineConfig(name string) error { - mc, err := r.getMachineConfig(name) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return r.Delete(context.TODO(), mc) -} - -func (r *PerformanceProfileReconciler) getKubeletConfig(name string) (*mcov1.KubeletConfig, error) { - kc := &mcov1.KubeletConfig{} - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - if err := r.Get(context.TODO(), key, kc); err != nil { - return nil, err - } - return kc, nil -} - -func (r *PerformanceProfileReconciler) getMutatedKubeletConfig(kc *mcov1.KubeletConfig) (*mcov1.KubeletConfig, error) { - existing, err := r.getKubeletConfig(kc.Name) - if errors.IsNotFound(err) { - return kc, nil - } - - if err != nil { - return nil, err - } - - mutated := existing.DeepCopy() - mergeMaps(kc.Annotations, mutated.Annotations) - mergeMaps(kc.Labels, mutated.Labels) - mutated.Spec = kc.Spec - - existingKubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{} - err = json.Unmarshal(existing.Spec.KubeletConfig.Raw, existingKubeletConfig) - if err != nil { - return nil, err - } - - mutatedKubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{} - err = json.Unmarshal(mutated.Spec.KubeletConfig.Raw, mutatedKubeletConfig) - if err != nil { - return nil, err - } - - // we do not need to update if it no change between mutated and existing object - if apiequality.Semantic.DeepEqual(existingKubeletConfig, mutatedKubeletConfig) && - apiequality.Semantic.DeepEqual(existing.Spec.MachineConfigPoolSelector, mutated.Spec.MachineConfigPoolSelector) && - apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) && - apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) { - return nil, nil - } - - return mutated, nil -} - -func (r *PerformanceProfileReconciler) createOrUpdateKubeletConfig(kc *mcov1.KubeletConfig) error { - _, err := r.getKubeletConfig(kc.Name) - if errors.IsNotFound(err) { - klog.Infof("Create kubelet-config %q", kc.Name) - if err := r.Create(context.TODO(), kc); err != nil { - return err - } - return nil - } - - if err != nil { - return err - } - - klog.Infof("Update kubelet-config %q", kc.Name) - return r.Update(context.TODO(), kc) -} - -func (r *PerformanceProfileReconciler) deleteKubeletConfig(name string) error { - kc, err := r.getKubeletConfig(name) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return r.Delete(context.TODO(), kc) -} - -func (r *PerformanceProfileReconciler) getTuned(name string, namespace string) (*tunedv1.Tuned, error) { - tuned := &tunedv1.Tuned{} - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - if err := r.Get(context.TODO(), key, tuned); err != nil { - return nil, err - } - return tuned, nil -} - -func (r *PerformanceProfileReconciler) getMutatedTuned(tuned *tunedv1.Tuned) (*tunedv1.Tuned, error) { - existing, err := r.getTuned(tuned.Name, tuned.Namespace) - if errors.IsNotFound(err) { - return tuned, nil - } - - if err != nil { - return nil, err - } - - mutated := existing.DeepCopy() - mergeMaps(tuned.Annotations, mutated.Annotations) - mergeMaps(tuned.Labels, mutated.Labels) - mutated.Spec = tuned.Spec - - // we do not need to update if it no change between mutated and existing object - if apiequality.Semantic.DeepEqual(existing.Spec, mutated.Spec) && - apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) && - apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) { - return nil, nil - } - - return mutated, nil -} - -func (r *PerformanceProfileReconciler) createOrUpdateTuned(tuned *tunedv1.Tuned, profileName string) error { - - if err := r.removeOutdatedTuned(tuned, profileName); err != nil { - return err - } - - _, err := r.getTuned(tuned.Name, tuned.Namespace) - if errors.IsNotFound(err) { - klog.Infof("Create tuned %q under the namespace %q", tuned.Name, tuned.Namespace) - if err := r.Create(context.TODO(), tuned); err != nil { - return err - } - return nil - } - - if err != nil { - return err - } - - klog.Infof("Update tuned %q under the namespace %q", tuned.Name, tuned.Namespace) - return r.Update(context.TODO(), tuned) -} - -func (r *PerformanceProfileReconciler) removeOutdatedTuned(tuned *tunedv1.Tuned, profileName string) error { - tunedList := &tunedv1.TunedList{} - if err := r.List(context.TODO(), tunedList); err != nil { - klog.Errorf("Unable to list tuned objects for outdated removal procedure: %v", err) - return err - } - - for t := range tunedList.Items { - tunedItem := tunedList.Items[t] - ownerReferences := tunedItem.ObjectMeta.OwnerReferences - for o := range ownerReferences { - if ownerReferences[o].Name == profileName && tunedItem.Name != tuned.Name { - if err := r.deleteTuned(tunedItem.Name, tunedItem.Namespace); err != nil { - return err - } - } - } - } - return nil -} - -func (r *PerformanceProfileReconciler) deleteTuned(name string, namespace string) error { - tuned, err := r.getTuned(name, namespace) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return r.Delete(context.TODO(), tuned) -} - -func (r *PerformanceProfileReconciler) getRuntimeClass(name string) (*nodev1beta1.RuntimeClass, error) { - runtimeClass := &nodev1beta1.RuntimeClass{} - key := types.NamespacedName{ - Name: name, - } - if err := r.Get(context.TODO(), key, runtimeClass); err != nil { - return nil, err - } - return runtimeClass, nil -} - -func (r *PerformanceProfileReconciler) getMutatedRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass) (*nodev1beta1.RuntimeClass, error) { - existing, err := r.getRuntimeClass(runtimeClass.Name) - if errors.IsNotFound(err) { - return runtimeClass, nil - } - - if err != nil { - return nil, err - } - - mutated := existing.DeepCopy() - mergeMaps(runtimeClass.Annotations, mutated.Annotations) - mergeMaps(runtimeClass.Labels, mutated.Labels) - mutated.Handler = runtimeClass.Handler - mutated.Scheduling = runtimeClass.Scheduling - - // we do not need to update if it no change between mutated and existing object - if apiequality.Semantic.DeepEqual(existing.Handler, mutated.Handler) && - apiequality.Semantic.DeepEqual(existing.Scheduling, mutated.Scheduling) && - apiequality.Semantic.DeepEqual(existing.Labels, mutated.Labels) && - apiequality.Semantic.DeepEqual(existing.Annotations, mutated.Annotations) { - return nil, nil - } - - return mutated, nil -} - -func (r *PerformanceProfileReconciler) createOrUpdateRuntimeClass(runtimeClass *nodev1beta1.RuntimeClass) error { - _, err := r.getRuntimeClass(runtimeClass.Name) - if errors.IsNotFound(err) { - klog.Infof("Create runtime class %q", runtimeClass.Name) - if err := r.Create(context.TODO(), runtimeClass); err != nil { - return err - } - return nil - } - - if err != nil { - return err - } - - klog.Infof("Update runtime class %q", runtimeClass.Name) - return r.Update(context.TODO(), runtimeClass) -} - -func (r *PerformanceProfileReconciler) deleteRuntimeClass(name string) error { - runtimeClass, err := r.getRuntimeClass(name) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - return r.Delete(context.TODO(), runtimeClass) -} diff --git a/pkg/pao/controller/status.go b/pkg/pao/controller/status.go deleted file mode 100644 index 09b8858f1..000000000 --- a/pkg/pao/controller/status.go +++ /dev/null @@ -1,296 +0,0 @@ -package controller - -import ( - "bytes" - "context" - "time" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - conditionFailedToFindMachineConfigPool = "GettingMachineConfigPoolFailed" - conditionBadMachineConfigLabels = "BadMachineConfigLabels" - conditionReasonComponentsCreationFailed = "ComponentCreationFailed" - conditionReasonMCPDegraded = "MCPDegraded" - conditionFailedGettingMCPStatus = "GettingMCPStatusFailed" - conditionKubeletFailed = "KubeletConfig failure" - conditionFailedGettingKubeletStatus = "GettingKubeletStatusFailed" - conditionReasonTunedDegraded = "TunedProfileDegraded" - conditionFailedGettingTunedProfileStatus = "GettingTunedStatusFailed" -) - -func (r *PerformanceProfileReconciler) updateStatus(profile *performancev2.PerformanceProfile, conditions []conditionsv1.Condition) error { - profileCopy := profile.DeepCopy() - - if conditions != nil { - profileCopy.Status.Conditions = conditions - } - - // check if we need to update the status - modified := false - - // since we always set the same four conditions, we don't need to check if we need to remove old conditions - for _, newCondition := range profileCopy.Status.Conditions { - oldCondition := conditionsv1.FindStatusCondition(profile.Status.Conditions, newCondition.Type) - if oldCondition == nil { - modified = true - break - } - - // ignore timestamps to avoid infinite reconcile loops - if oldCondition.Status != newCondition.Status || - oldCondition.Reason != newCondition.Reason || - oldCondition.Message != newCondition.Message { - - modified = true - break - } - } - - if profileCopy.Status.Tuned == nil { - tunedNamespacedname := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tunedStatus := tunedNamespacedname.String() - profileCopy.Status.Tuned = &tunedStatus - modified = true - } - - if profileCopy.Status.RuntimeClass == nil { - runtimeClassName := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - profileCopy.Status.RuntimeClass = &runtimeClassName - modified = true - } - - if !modified { - return nil - } - - klog.Infof("Updating the performance profile %q status", profile.Name) - return r.Status().Update(context.TODO(), profileCopy) -} - -func (r *PerformanceProfileReconciler) getAvailableConditions() []conditionsv1.Condition { - now := time.Now() - return []conditionsv1.Condition{ - { - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - } -} - -func (r *PerformanceProfileReconciler) getDegradedConditions(reason string, message string) []conditionsv1.Condition { - now := time.Now() - return []conditionsv1.Condition{ - { - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - LastHeartbeatTime: metav1.Time{Time: now}, - Reason: reason, - Message: message, - }, - } -} - -func (r *PerformanceProfileReconciler) getProgressingConditions(reason string, message string) []conditionsv1.Condition { - now := time.Now() - - return []conditionsv1.Condition{ - { - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - }, - { - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - LastTransitionTime: metav1.Time{Time: now}, - Reason: reason, - Message: message, - }, - { - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionFalse, - LastTransitionTime: metav1.Time{Time: now}, - }, - } -} - -func (r *PerformanceProfileReconciler) getMCPDegradedCondition(profileMCP *mcov1.MachineConfigPool) ([]conditionsv1.Condition, error) { - message := bytes.Buffer{} - for _, condition := range profileMCP.Status.Conditions { - if (condition.Type == mcov1.MachineConfigPoolNodeDegraded || condition.Type == mcov1.MachineConfigPoolRenderDegraded) && condition.Status == corev1.ConditionTrue { - if len(condition.Reason) > 0 { - message.WriteString("Machine config pool " + profileMCP.Name + " Degraded Reason: " + condition.Reason + ".\n") - } - if len(condition.Message) > 0 { - message.WriteString("Machine config pool " + profileMCP.Name + " Degraded Message: " + condition.Message + ".\n") - } - } - } - - messageString := message.String() - if len(messageString) == 0 { - return nil, nil - } - - return r.getDegradedConditions(conditionReasonMCPDegraded, messageString), nil -} - -func (r *PerformanceProfileReconciler) getKubeletConditionsByProfile(profile *performancev2.PerformanceProfile) ([]conditionsv1.Condition, error) { - name := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - kc, err := r.getKubeletConfig(name) - - // do not drop an error when kubelet config does not exist - if errors.IsNotFound(err) { - return nil, nil - } - - if err != nil { - return nil, err - } - - latestCondition := getLatestKubeletConfigCondition(kc.Status.Conditions) - if latestCondition == nil { - return nil, nil - } - - if latestCondition.Type != mcov1.KubeletConfigFailure { - return nil, nil - } - - return r.getDegradedConditions(conditionKubeletFailed, latestCondition.Message), nil -} - -func (r *PerformanceProfileReconciler) getTunedConditionsByProfile(profile *performancev2.PerformanceProfile) ([]conditionsv1.Condition, error) { - tunedProfileList := &tunedv1.ProfileList{} - if err := r.List(context.TODO(), tunedProfileList); err != nil { - klog.Errorf("Cannot list Tuned Profiles to match with profile %q : %v", profile.Name, err) - return nil, err - } - - selector := labels.SelectorFromSet(profile.Spec.NodeSelector) - nodes := &corev1.NodeList{} - if err := r.List(context.TODO(), nodes, &client.ListOptions{LabelSelector: selector}); err != nil { - return nil, err - } - - // remove Tuned profiles that are not associate with this perfomance profile - // Tuned profile's name and node's name should be equal - filtered := removeUnMatchedTunedProfiles(nodes.Items, tunedProfileList.Items) - message := bytes.Buffer{} - for _, tunedProfile := range filtered { - isDegraded := false - isApplied := true - var tunedDegradedCondition *tunedv1.ProfileStatusCondition - - for _, condition := range tunedProfile.Status.Conditions { - if (condition.Type == tunedv1.TunedDegraded) && condition.Status == corev1.ConditionTrue { - isDegraded = true - tunedDegradedCondition = &condition - } - - if (condition.Type == tunedv1.TunedProfileApplied) && condition.Status == corev1.ConditionFalse { - isApplied = false - } - } - // We need both condition to exists, - // since there is a scenario where both Degraded & Applied condition are true - if isDegraded == true && isApplied == false { - if len(tunedDegradedCondition.Reason) > 0 { - message.WriteString("Tuned " + tunedProfile.GetName() + " Degraded Reason: " + tunedDegradedCondition.Reason + ".\n") - } - if len(tunedDegradedCondition.Message) > 0 { - message.WriteString("Tuned " + tunedProfile.GetName() + " Degraded Message: " + tunedDegradedCondition.Message + ".\n") - } - } - } - - messageString := message.String() - if len(messageString) == 0 { - return nil, nil - } - - return r.getDegradedConditions(conditionReasonTunedDegraded, messageString), nil -} - -func getLatestKubeletConfigCondition(conditions []mcov1.KubeletConfigCondition) *mcov1.KubeletConfigCondition { - var latestCondition *mcov1.KubeletConfigCondition - for i := 0; i < len(conditions); i++ { - if latestCondition == nil || latestCondition.LastTransitionTime.Before(&conditions[i].LastTransitionTime) { - latestCondition = &conditions[i] - } - } - return latestCondition -} - -func removeUnMatchedTunedProfiles(nodes []corev1.Node, profiles []tunedv1.Profile) []tunedv1.Profile { - filteredProfiles := make([]tunedv1.Profile, 0) - for _, profile := range profiles { - for _, node := range nodes { - if profile.Name == node.Name { - filteredProfiles = append(filteredProfiles, profile) - break - } - } - } - return filteredProfiles -} diff --git a/pkg/pao/profilecreator/helper.go b/pkg/pao/profilecreator/helper.go deleted file mode 100644 index a3ee64c63..000000000 --- a/pkg/pao/profilecreator/helper.go +++ /dev/null @@ -1,18 +0,0 @@ -package profilecreator - -import ( - v1 "k8s.io/api/core/v1" -) - -func newTestNode(nodeName string) *v1.Node { - n := v1.Node{} - n.Name = nodeName - return &n -} -func newTestNodeList(nodes ...*v1.Node) []*v1.Node { - nodeList := make([]*v1.Node, 0) - for _, node := range nodes { - nodeList = append(nodeList, node) - } - return nodeList -} diff --git a/pkg/pao/profilecreator/mcp.go b/pkg/pao/profilecreator/mcp.go deleted file mode 100644 index 2ef37269e..000000000 --- a/pkg/pao/profilecreator/mcp.go +++ /dev/null @@ -1,197 +0,0 @@ -package profilecreator - -import ( - "fmt" - "strings" - - log "github.com/sirupsen/logrus" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - - mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -// GetMCPSelector returns a label that is unique to the target pool, error otherwise -func GetMCPSelector(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.MachineConfigPool) (map[string]string, error) { - mcpSelector := make(map[string]string) - - // go over all the labels to find the unique ones - for key, value := range pool.Labels { - unique := true - for _, mcp := range clusterPools { - if mcp.Name == pool.Name { - continue - } - if mcpValue, found := mcp.Labels[key]; found { - if value == mcpValue { - unique = false - break - } - } - } - if unique { - mcpSelector[key] = value - } - } - - if len(mcpSelector) == 0 { - return nil, fmt.Errorf("can't find a unique label for '%s' MCP", pool.Name) - } - - // find a label that includes the MCP name - if len(mcpSelector) > 1 { - for key, value := range mcpSelector { - if strings.HasSuffix(key, pool.Name) { - mcpSelector = make(map[string]string) - mcpSelector[key] = value - break - } - } - } - - // pick a single unique label - if len(mcpSelector) > 1 { - for key, value := range mcpSelector { - mcpSelector = make(map[string]string) - mcpSelector[key] = value - break - } - } - - return mcpSelector, nil -} - -// GetNodesForPool returns the nodes belonging to the input mcp -// Adapted (including dependencies) from: -// https://github.com/openshift/machine-config-operator/blob/e4aa3bc5a405c67fb112b24e24b2c372457b3358/pkg/controller/node/node_controller.go#L745 -func GetNodesForPool(pool *mcfgv1.MachineConfigPool, clusterPools []*mcfgv1.MachineConfigPool, clusterNodes []*corev1.Node) ([]*corev1.Node, error) { - var nodes []*corev1.Node - - poolNodeSelector, err := metav1.LabelSelectorAsSelector(pool.Spec.NodeSelector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - - for _, n := range clusterNodes { - p, err := getPrimaryPoolForNode(n, clusterPools) - if err != nil { - log.Warningf("can't get pool for node %q: %v", n.Name, err) - continue - } - if p == nil { - continue - } - if p.Name != pool.Name { - continue - } - var unschedulable bool - for _, taint := range n.Spec.Taints { - if taint.Effect == corev1.TaintEffectNoSchedule && poolNodeSelector.Matches(labels.Set{taint.Key: taint.Value}) { - unschedulable = true - break - } - } - if unschedulable { - continue - } - nodes = append(nodes, n) - } - return nodes, nil -} - -// getPrimaryPoolForNode uses getPoolsForNode and returns the first one which is the one the node targets -func getPrimaryPoolForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool) (*mcfgv1.MachineConfigPool, error) { - pools, err := getPoolsForNode(node, clusterPools) - if err != nil { - return nil, err - } - if pools == nil { - return nil, nil - } - return pools[0], nil -} - -// getPoolsForNode chooses the MachineConfigPools that should be used for a given node. -// It disambiguates in the case where e.g. a node has both master/worker roles applied, -// and where a custom role may be used. It returns a slice of all the pools the node belongs to. -// It also ignores the Windows nodes. -func getPoolsForNode(node *corev1.Node, clusterPools []*mcfgv1.MachineConfigPool) ([]*mcfgv1.MachineConfigPool, error) { - if isWindows(node) { - // This is not an error, is this a Windows Node and it won't be managed by MCO. We're explicitly logging - // here at a high level to disambiguate this from other pools = nil scenario - log.Infof("Node %v is a windows node so won't be managed by MCO", node.Name) - return nil, nil - } - - var pools []*mcfgv1.MachineConfigPool - for _, p := range clusterPools { - selector, err := metav1.LabelSelectorAsSelector(p.Spec.NodeSelector) - if err != nil { - return nil, fmt.Errorf("invalid label selector: %v", err) - } - - // If a pool with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(node.Labels)) { - continue - } - - pools = append(pools, p) - } - - if len(pools) == 0 { - // This is not an error, as there might be nodes in cluster that are not managed by machineconfigpool. - return nil, nil - } - - var master, worker *mcfgv1.MachineConfigPool - var custom []*mcfgv1.MachineConfigPool - for _, pool := range pools { - if pool.Name == "master" { - master = pool - } else if pool.Name == "worker" { - worker = pool - } else { - custom = append(custom, pool) - } - } - - if len(custom) > 1 { - return nil, fmt.Errorf("node %s belongs to %d custom roles, cannot proceed with this Node", node.Name, len(custom)) - } else if len(custom) == 1 { - // We don't support making custom pools for masters - if master != nil { - return nil, fmt.Errorf("node %s has both master role and custom role %s", node.Name, custom[0].Name) - } - // One custom role, let's use its pool - pls := []*mcfgv1.MachineConfigPool{custom[0]} - if worker != nil { - pls = append(pls, worker) - } - return pls, nil - } else if master != nil { - // In the case where a node is both master/worker, have it live under - // the master pool. This occurs in CodeReadyContainers and general - // "single node" deployments, which one may want to do for testing bare - // metal, etc. - return []*mcfgv1.MachineConfigPool{master}, nil - } - - // Otherwise, it's a worker with no custom roles. - return []*mcfgv1.MachineConfigPool{worker}, nil -} - -// isWindows checks if given node is a Windows node or a Linux node -func isWindows(node *corev1.Node) bool { - windowsOsValue := "windows" - if value, ok := node.ObjectMeta.Labels["kubernetes.io/os"]; ok { - if value == windowsOsValue { - return true - } - return false - } - // All the nodes should have a OS label populated by kubelet, if not just to maintain - // backwards compatibility, we can returning true here. - return false -} diff --git a/pkg/pao/profilecreator/profilecreator.go b/pkg/pao/profilecreator/profilecreator.go deleted file mode 100644 index dd556d69e..000000000 --- a/pkg/pao/profilecreator/profilecreator.go +++ /dev/null @@ -1,556 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2021 Red Hat, Inc. - */ - -package profilecreator - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "reflect" - "sort" - "strings" - - "github.com/jaypipes/ghw" - "github.com/jaypipes/ghw/pkg/cpu" - "github.com/jaypipes/ghw/pkg/option" - "github.com/jaypipes/ghw/pkg/topology" - log "github.com/sirupsen/logrus" - - k8syaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - v1 "k8s.io/api/core/v1" -) - -const ( - // ClusterScopedResources defines the subpath, relative to the top-level must-gather directory. - // A top-level must-gather directory is of the following format: - // must-gather-dir/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256- - // Here we find the cluster-scoped definitions saved by must-gather - ClusterScopedResources = "cluster-scoped-resources" - // CoreNodes defines the subpath, relative to ClusterScopedResources, on which we find node-specific data - CoreNodes = "core/nodes" - // MCPools defines the subpath, relative to ClusterScopedResources, on which we find the machine config pool definitions - MCPools = "machineconfiguration.openshift.io/machineconfigpools" - // YAMLSuffix is the extension of the yaml files saved by must-gather - YAMLSuffix = ".yaml" - // Nodes defines the subpath, relative to top-level must-gather directory, on which we find node-specific data - Nodes = "nodes" - // SysInfoFileName defines the name of the file where ghw snapshot is stored - SysInfoFileName = "sysinfo.tgz" - // noSMTKernelArg is the kernel arg value to disable SMT in a system - noSMTKernelArg = "nosmt" - // allCores correspond to the value when all the processorCores need to be added to the generated CPUset - allCores = -1 -) - -var ( - // ValidPowerConsumptionModes are a set of valid power consumption modes - // default => no args - // low-latency => "nmi_watchdog=0", "audit=0", "mce=off" - // ultra-low-latency: low-latency values + "processor.max_cstate=1", "intel_idle.max_cstate=0", "idle=poll" - // For more information on CPU "C-states" please refer to https://gist.github.com/wmealing/2dd2b543c4d3cff6cab7 - ValidPowerConsumptionModes = []string{"default", "low-latency", "ultra-low-latency"} - lowLatencyKernelArgs = map[string]bool{"nmi_watchdog=0": true, "audit=0": true, "mce=off": true} - ultraLowLatencyKernelArgs = map[string]bool{"processor.max_cstate=1": true, "intel_idle.max_cstate=0": true, "idle=poll": true} -) - -func getMustGatherFullPathsWithFilter(mustGatherPath string, suffix string, filter string) (string, error) { - var paths []string - - // don't assume directory names, only look for the suffix, filter out files having "filter" in their names - err := filepath.Walk(mustGatherPath, func(path string, info os.FileInfo, err error) error { - if strings.HasSuffix(path, suffix) { - if len(filter) == 0 || !strings.Contains(path, filter) { - paths = append(paths, path) - } - } - return nil - }) - if err != nil { - return "", fmt.Errorf("failed to get the path mustGatherPath:%s, suffix:%s %v", mustGatherPath, suffix, err) - } - - if len(paths) == 0 { - return "", fmt.Errorf("no match for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix) - - } - if len(paths) > 1 { - log.Infof("Multiple matches for the specified must gather directory path: %s and suffix: %s", mustGatherPath, suffix) - return "", fmt.Errorf("Multiple matches for the specified must gather directory path: %s and suffix: %s.\n Expected only one performance-addon-operator-must-gather* directory, please check the must-gather tarball", mustGatherPath, suffix) - } - // returning one possible path - return paths[0], err -} - -func getMustGatherFullPaths(mustGatherPath string, suffix string) (string, error) { - return getMustGatherFullPathsWithFilter(mustGatherPath, suffix, "") -} - -func getNode(mustGatherDirPath, nodeName string) (*v1.Node, error) { - var node v1.Node - nodePathSuffix := path.Join(ClusterScopedResources, CoreNodes, nodeName) - path, err := getMustGatherFullPaths(mustGatherDirPath, nodePathSuffix) - if err != nil { - return nil, fmt.Errorf("failed to get MachineConfigPool for %s: %v", nodeName, err) - } - - src, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("failed to open %q: %v", path, err) - } - defer src.Close() - - dec := k8syaml.NewYAMLOrJSONDecoder(src, 1024) - if err := dec.Decode(&node); err != nil { - return nil, fmt.Errorf("failed to decode %q: %v", path, err) - } - return &node, nil -} - -// GetNodeList returns the list of nodes using the Node YAMLs stored in Must Gather -func GetNodeList(mustGatherDirPath string) ([]*v1.Node, error) { - machines := make([]*v1.Node, 0) - - nodePathSuffix := path.Join(ClusterScopedResources, CoreNodes) - nodePath, err := getMustGatherFullPaths(mustGatherDirPath, nodePathSuffix) - if err != nil { - return nil, fmt.Errorf("failed to get Nodes from must gather directory: %v", err) - } - if nodePath == "" { - return nil, fmt.Errorf("failed to get Nodes from must gather directory: %v", err) - } - - nodes, err := ioutil.ReadDir(nodePath) - if err != nil { - return nil, fmt.Errorf("failed to list mustGatherPath directories: %v", err) - } - for _, node := range nodes { - nodeName := node.Name() - node, err := getNode(mustGatherDirPath, nodeName) - if err != nil { - return nil, fmt.Errorf("failed to get Nodes %s: %v", nodeName, err) - } - machines = append(machines, node) - } - return machines, nil -} - -// GetMCPList returns the list of MCPs using the mcp YAMLs stored in Must Gather -func GetMCPList(mustGatherDirPath string) ([]*machineconfigv1.MachineConfigPool, error) { - pools := make([]*machineconfigv1.MachineConfigPool, 0) - - mcpPathSuffix := path.Join(ClusterScopedResources, MCPools) - mcpPath, err := getMustGatherFullPaths(mustGatherDirPath, mcpPathSuffix) - if err != nil { - return nil, fmt.Errorf("failed to get MCPs: %v", err) - } - if mcpPath == "" { - return nil, fmt.Errorf("failed to get MCPs path: %v", err) - } - - mcpFiles, err := ioutil.ReadDir(mcpPath) - if err != nil { - return nil, fmt.Errorf("failed to list mustGatherPath directories: %v", err) - } - for _, mcp := range mcpFiles { - mcpName := strings.TrimSuffix(mcp.Name(), filepath.Ext(mcp.Name())) - - mcp, err := GetMCP(mustGatherDirPath, mcpName) - // master pool relevant only when pods can be scheduled on masters, e.g. SNO - if mcpName != "master" && err != nil { - return nil, fmt.Errorf("can't obtain MCP %s: %v", mcpName, err) - } - pools = append(pools, mcp) - } - return pools, nil -} - -// GetMCP returns an MCP object corresponding to a specified MCP Name -func GetMCP(mustGatherDirPath, mcpName string) (*machineconfigv1.MachineConfigPool, error) { - var mcp machineconfigv1.MachineConfigPool - - mcpPathSuffix := path.Join(ClusterScopedResources, MCPools, mcpName+YAMLSuffix) - mcpPath, err := getMustGatherFullPaths(mustGatherDirPath, mcpPathSuffix) - if err != nil { - return nil, fmt.Errorf("failed to obtain MachineConfigPool %s: %v", mcpName, err) - } - if mcpPath == "" { - return nil, fmt.Errorf("failed to obtain MachineConfigPool, mcp:%s does not exist: %v", mcpName, err) - } - - src, err := os.Open(mcpPath) - if err != nil { - return nil, fmt.Errorf("failed to open %q: %v", mcpPath, err) - } - defer src.Close() - dec := k8syaml.NewYAMLOrJSONDecoder(src, 1024) - if err := dec.Decode(&mcp); err != nil { - return nil, fmt.Errorf("failed to decode %q: %v", mcpPath, err) - } - return &mcp, nil -} - -// NewGHWHandler is a handler to use ghw options corresponding to a node -func NewGHWHandler(mustGatherDirPath string, node *v1.Node) (*GHWHandler, error) { - nodeName := node.GetName() - nodePathSuffix := path.Join(Nodes) - nodepath, err := getMustGatherFullPathsWithFilter(mustGatherDirPath, nodePathSuffix, ClusterScopedResources) - if err != nil { - return nil, fmt.Errorf("can't obtain the node path %s: %v", nodeName, err) - } - _, err = os.Stat(path.Join(nodepath, nodeName, SysInfoFileName)) - if err != nil { - return nil, fmt.Errorf("can't obtain the path: %s for node %s: %v", nodeName, nodepath, err) - } - options := ghw.WithSnapshot(ghw.SnapshotOptions{ - Path: path.Join(nodepath, nodeName, SysInfoFileName), - }) - ghwHandler := &GHWHandler{snapShotOptions: options, Node: node} - return ghwHandler, nil -} - -// GHWHandler is a wrapper around ghw to get the API object -type GHWHandler struct { - snapShotOptions *option.Option - Node *v1.Node -} - -// CPU returns a CPUInfo struct that contains information about the CPUs on the host system -func (ghwHandler GHWHandler) CPU() (*cpu.Info, error) { - return ghw.CPU(ghwHandler.snapShotOptions) -} - -// SortedTopology returns a TopologyInfo struct that contains information about the Topology sorted by numa ids and cpu ids on the host system -func (ghwHandler GHWHandler) SortedTopology() (*topology.Info, error) { - topologyInfo, err := ghw.Topology(ghwHandler.snapShotOptions) - if err != nil { - return nil, fmt.Errorf("can't obtain topology info from GHW snapshot: %v", err) - } - sort.Slice(topologyInfo.Nodes, func(x, y int) bool { - return topologyInfo.Nodes[x].ID < topologyInfo.Nodes[y].ID - }) - for _, node := range topologyInfo.Nodes { - for _, core := range node.Cores { - sort.Slice(core.LogicalProcessors, func(x, y int) bool { - return core.LogicalProcessors[x] < core.LogicalProcessors[y] - }) - } - sort.Slice(node.Cores, func(i, j int) bool { - return node.Cores[i].LogicalProcessors[0] < node.Cores[j].LogicalProcessors[0] - }) - } - return topologyInfo, nil -} - -// topologyHTDisabled returns topologyinfo in case Hyperthreading needs to be disabled. -// It receives a pointer to Topology.Info and deletes logicalprocessors from individual cores. -// The behaviour of this function depends on ghw data representation. -func topologyHTDisabled(info *topology.Info) *topology.Info { - disabledHTTopology := &topology.Info{ - Architecture: info.Architecture, - } - newNodes := []*topology.Node{} - for _, node := range info.Nodes { - var newNode *topology.Node - cores := []*cpu.ProcessorCore{} - for _, processorCore := range node.Cores { - newCore := cpu.ProcessorCore{ID: processorCore.ID, - Index: processorCore.Index, - NumThreads: 1, - } - // LogicalProcessors is a slice of ints representing the logical processor IDs assigned to - // a processing unit for a core. GHW API gurantees that the logicalProcessors correspond - // to hyperthread pairs and in the code below we select only the first hyperthread (id=0) - // of the available logical processors. - for id, logicalProcessor := range processorCore.LogicalProcessors { - // Please refer to https://www.kernel.org/doc/Documentation/x86/topology.txt for more information on - // x86 hardware topology. This document clarifies the main aspects of x86 topology modelling and - // representation in the linux kernel and explains why we select id=0 for obtaining the first - // hyperthread (logical core). - if id == 0 { - newCore.LogicalProcessors = []int{logicalProcessor} - cores = append(cores, &newCore) - } - } - newNode = &topology.Node{Cores: cores, - ID: node.ID, - } - } - newNodes = append(newNodes, newNode) - disabledHTTopology.Nodes = newNodes - } - return disabledHTTopology -} - -// GetReservedAndIsolatedCPUs returns Reserved and Isolated CPUs -func (ghwHandler GHWHandler) GetReservedAndIsolatedCPUs(reservedCPUCount int, splitReservedCPUsAcrossNUMA bool, disableHTFlag bool) (cpuset.CPUSet, cpuset.CPUSet, error) { - cpuInfo, err := ghwHandler.CPU() - if err != nil { - return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't obtain CPU info from GHW snapshot: %v", err) - } - - if reservedCPUCount <= 0 || reservedCPUCount >= int(cpuInfo.TotalThreads) { - return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("please specify the reserved CPU count in the range [1,%d]", cpuInfo.TotalThreads-1) - } - topologyInfo, err := ghwHandler.SortedTopology() - if err != nil { - return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't obtain Topology Info from GHW snapshot: %v", err) - } - htEnabled, err := ghwHandler.IsHyperthreadingEnabled() - if err != nil { - return cpuset.CPUSet{}, cpuset.CPUSet{}, fmt.Errorf("can't determine if Hyperthreading is enabled or not: %v", err) - } - //currently HT is enabled on the system and the user wants to disable HT - if htEnabled && disableHTFlag { - htEnabled = false - log.Infof("Currently hyperthreading is enabled and the performance profile will disable it") - topologyInfo = topologyHTDisabled(topologyInfo) - - } - log.Infof("NUMA cell(s): %d", len(topologyInfo.Nodes)) - totalCPUs := 0 - for id, node := range topologyInfo.Nodes { - coreList := []int{} - for _, core := range node.Cores { - coreList = append(coreList, core.LogicalProcessors...) - } - log.Infof("NUMA cell %d : %v", id, coreList) - totalCPUs += len(coreList) - } - - log.Infof("CPU(s): %d", totalCPUs) - - if splitReservedCPUsAcrossNUMA { - return ghwHandler.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfo.Nodes) - } - return ghwHandler.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfo.Nodes) -} - -type cpuAccumulator struct { - builder *cpuset.Builder - count int -} - -func newCPUAccumulator() *cpuAccumulator { - return &cpuAccumulator{ - builder: cpuset.NewBuilder(), - } -} - -// AddCores adds logical cores from the slice of *cpu.ProcessorCore to a CPUset till the cpuset size is equal to the max value specified -// In case the max is specified as allCores, all the cores from the slice of *cpu.ProcessorCore are added to the CPUSet -func (ca *cpuAccumulator) AddCores(max int, cores []*cpu.ProcessorCore) { - for _, processorCore := range cores { - for _, core := range processorCore.LogicalProcessors { - if ca.count < max || max == allCores { - ca.builder.Add(core) - ca.count++ - } - } - } -} - -func (ca *cpuAccumulator) Result() cpuset.CPUSet { - return ca.builder.Result() -} - -// getCPUsSplitAcrossNUMA returns Reserved and Isolated CPUs split across NUMA nodes -// We identify the right number of CPUs that need to be allocated per NUMA node, meaning reservedPerNuma + (the additional number based on the remainder and the NUMA node) -// E.g. If the user requests 15 reserved cpus and we have 4 numa nodes, we find reservedPerNuma in this case is 3 and remainder = 3. -// For each numa node we find a max which keeps track of the cumulative resources that should be allocated for each NUMA node: -// max = (numaID+1)*reservedPerNuma + (numaNodeNum - remainder) -// For NUMA node 0 max = (0+1)*3 + 4-3 = 4 remainder is decremented => remainder is 2 -// For NUMA node 1 max = (1+1)*3 + 4-2 = 8 remainder is decremented => remainder is 1 -// For NUMA node 2 max = (2+1)*3 + 4-2 = 12 remainder is decremented => remainder is 0 -// For NUMA Node 3 remainder = 0 so max = 12 + 3 = 15. -func (ghwHandler GHWHandler) getCPUsSplitAcrossNUMA(reservedCPUCount int, htEnabled bool, topologyInfoNodes []*topology.Node) (cpuset.CPUSet, cpuset.CPUSet, error) { - reservedCPUs := newCPUAccumulator() - var isolatedCPUSet cpuset.CPUSet - numaNodeNum := len(topologyInfoNodes) - - max := 0 - reservedPerNuma := reservedCPUCount / numaNodeNum - remainder := reservedCPUCount % numaNodeNum - if remainder != 0 { - log.Warnf("The reserved CPUs cannot be split equally across NUMA Nodes") - } - for numaID, node := range topologyInfoNodes { - if remainder != 0 { - max = (numaID+1)*reservedPerNuma + (numaNodeNum - remainder) - remainder-- - } else { - max = max + reservedPerNuma - } - if max%2 != 0 && htEnabled { - return reservedCPUs.Result(), isolatedCPUSet, fmt.Errorf("can't allocate odd number of CPUs from a NUMA Node") - } - reservedCPUs.AddCores(max, node.Cores) - } - totalCPUSet := totalCPUSetFromTopology(topologyInfoNodes) - reservedCPUSet := reservedCPUs.Result() - isolatedCPUSet = totalCPUSet.Difference(reservedCPUSet) - return reservedCPUSet, isolatedCPUSet, nil -} - -// getCPUsSequentially returns Reserved and Isolated CPUs sequentially -func (ghwHandler GHWHandler) getCPUsSequentially(reservedCPUCount int, htEnabled bool, topologyInfoNodes []*topology.Node) (cpuset.CPUSet, cpuset.CPUSet, error) { - reservedCPUs := newCPUAccumulator() - var isolatedCPUSet cpuset.CPUSet - if reservedCPUCount%2 != 0 && htEnabled { - return reservedCPUs.Result(), isolatedCPUSet, fmt.Errorf("can't allocate odd number of CPUs from a NUMA Node") - } - for _, node := range topologyInfoNodes { - reservedCPUs.AddCores(reservedCPUCount, node.Cores) - } - totalCPUSet := totalCPUSetFromTopology(topologyInfoNodes) - reservedCPUSet := reservedCPUs.Result() - isolatedCPUSet = totalCPUSet.Difference(reservedCPUSet) - return reservedCPUSet, isolatedCPUSet, nil -} - -func totalCPUSetFromTopology(topologyInfoNodes []*topology.Node) cpuset.CPUSet { - totalCPUs := newCPUAccumulator() - for _, node := range topologyInfoNodes { - //all the cores from node.Cores need to be added, hence allCores is specified as the max value - totalCPUs.AddCores(allCores, node.Cores) - } - return totalCPUs.Result() -} - -// IsHyperthreadingEnabled checks if hyperthreading is enabled on the system or not -func (ghwHandler GHWHandler) IsHyperthreadingEnabled() (bool, error) { - cpuInfo, err := ghwHandler.CPU() - if err != nil { - return false, fmt.Errorf("can't obtain CPU Info from GHW snapshot: %v", err) - } - // Since there is no way to disable flags per-processor (not system wide) we check the flags of the first available processor. - // A following implementation will leverage the /sys/devices/system/cpu/smt/active file which is the "standard" way to query HT. - return contains(cpuInfo.Processors[0].Capabilities, "ht"), nil -} - -// contains checks if a string is present in a slice -func contains(s []string, str string) bool { - for _, v := range s { - if v == str { - return true - } - } - return false -} - -// EnsureNodesHaveTheSameHardware returns an error if all the input nodes do not have the same hardware configuration -func EnsureNodesHaveTheSameHardware(nodeHandlers []*GHWHandler) error { - if len(nodeHandlers) < 1 { - return fmt.Errorf("no suitable nodes to compare") - } - - firstHandle := nodeHandlers[0] - firstTopology, err := firstHandle.SortedTopology() - if err != nil { - return fmt.Errorf("can't obtain Topology info from GHW snapshot for %s: %v", firstHandle.Node.GetName(), err) - } - - for _, handle := range nodeHandlers[1:] { - if err != nil { - return fmt.Errorf("can't obtain GHW snapshot handle for %s: %v", handle.Node.GetName(), err) - } - - topology, err := handle.SortedTopology() - if err != nil { - return fmt.Errorf("can't obtain Topology info from GHW snapshot for %s: %v", handle.Node.GetName(), err) - } - err = ensureSameTopology(firstTopology, topology) - if err != nil { - return fmt.Errorf("nodes %s and %s have different topology: %v", firstHandle.Node.GetName(), handle.Node.GetName(), err) - } - } - - return nil -} - -func ensureSameTopology(topology1, topology2 *topology.Info) error { - if topology1.Architecture != topology2.Architecture { - return fmt.Errorf("the architecture is different: %v vs %v", topology1.Architecture, topology2.Architecture) - } - - if len(topology1.Nodes) != len(topology2.Nodes) { - return fmt.Errorf("the number of NUMA nodes differ: %v vs %v", len(topology1.Nodes), len(topology2.Nodes)) - } - - for i, node1 := range topology1.Nodes { - node2 := topology2.Nodes[i] - if node1.ID != node2.ID { - return fmt.Errorf("the NUMA node ids differ: %v vs %v", node1.ID, node2.ID) - } - - cores1 := node1.Cores - cores2 := node2.Cores - if len(cores1) != len(cores2) { - return fmt.Errorf("the number of CPU cores in NUMA node %d differ: %v vs %v", - node1.ID, len(topology1.Nodes), len(topology2.Nodes)) - } - - for j, core1 := range cores1 { - if !reflect.DeepEqual(core1, cores2[j]) { - return fmt.Errorf("the CPU corres differ: %v vs %v", core1, cores2[j]) - } - } - } - - return nil -} - -// GetAdditionalKernelArgs returns a set of kernel parameters based on the power mode -func GetAdditionalKernelArgs(powerMode string, disableHT bool) []string { - kernelArgsSet := make(map[string]bool) - kernelArgsSlice := make([]string, 0, 6) - switch powerMode { - //default - case ValidPowerConsumptionModes[0]: - kernelArgsSlice = []string{} - //low-latency - case ValidPowerConsumptionModes[1]: - for arg, exist := range lowLatencyKernelArgs { - kernelArgsSet[arg] = exist - } - //ultra-low-latency - case ValidPowerConsumptionModes[2]: - //computing the union for two sets (lowLatencyKernelArgs,ultraLowLatencyKernelArgs) - for arg, exist := range lowLatencyKernelArgs { - kernelArgsSet[arg] = exist - } - for arg, exist := range ultraLowLatencyKernelArgs { - kernelArgsSet[arg] = exist - } - } - - for arg, exist := range kernelArgsSet { - if exist { - kernelArgsSlice = append(kernelArgsSlice, arg) - } - } - if disableHT { - kernelArgsSlice = append(kernelArgsSlice, noSMTKernelArg) - } - sort.Strings(kernelArgsSlice) - log.Infof("Additional Kernel Args based on the power consumption mode (%s):%v", powerMode, kernelArgsSlice) - return kernelArgsSlice -} diff --git a/pkg/pao/profilecreator/profilecreator_suite_test.go b/pkg/pao/profilecreator/profilecreator_suite_test.go deleted file mode 100644 index 861f71249..000000000 --- a/pkg/pao/profilecreator/profilecreator_suite_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package profilecreator - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestProfileCreator(t *testing.T) { - RegisterFailHandler(Fail) - RunSpecs(t, "Profile Creator Suite") -} diff --git a/pkg/pao/profilecreator/profilecreator_test.go b/pkg/pao/profilecreator/profilecreator_test.go deleted file mode 100644 index 8fadc73cd..000000000 --- a/pkg/pao/profilecreator/profilecreator_test.go +++ /dev/null @@ -1,844 +0,0 @@ -package profilecreator - -import ( - "path/filepath" - "sort" - - "github.com/jaypipes/ghw/pkg/cpu" - "github.com/jaypipes/ghw/pkg/topology" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - - mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - v1 "k8s.io/api/core/v1" -) - -const ( - mustGatherDirPath = "../../../test/e2e/pao/testdata/must-gather/must-gather.bare-metal" - mustGatherSNODirPath = "../../../test/e2e/pao/testdata/must-gather/must-gather.sno" -) - -var _ = Describe("PerformanceProfileCreator: MCP and Node Matching", func() { - var nodes []*v1.Node - var mcps []*mcfgv1.MachineConfigPool - - BeforeEach(func() { - var err error - - nodes, err = GetNodeList(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - mcps, err = GetMCPList(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("Identifying Nodes targeted by MCP", func() { - It("should find one machine in cnf-worker MCP", func() { - mcp, err := GetMCP(mustGatherDirPath, "worker-cnf") - Expect(err).ToNot(HaveOccurred()) - - matchedNodes, err := GetNodesForPool(mcp, mcps, nodes) - Expect(err).ToNot(HaveOccurred()) - Expect(matchedNodes).ToNot(BeNil()) - Expect(len(matchedNodes)).To(Equal(1)) - Expect(matchedNodes[0].GetName()).To(Equal("worker1")) - }) - It("should find 1 machine in worker MCP", func() { - mcp, err := GetMCP(mustGatherDirPath, "worker") - Expect(err).ToNot(HaveOccurred()) - - matchedNodes, err := GetNodesForPool(mcp, mcps, nodes) - Expect(err).ToNot(HaveOccurred()) - Expect(matchedNodes).ToNot(BeNil()) - Expect(len(matchedNodes)).To(Equal(1)) - Expect(matchedNodes[0].GetName()).To(Equal("worker2")) - }) - }) - - Context("Ensure the correct MCP selector is used", func() { - It("should detect the cnf-worker MCP selector", func() { - mcp, err := GetMCP(mustGatherDirPath, "worker-cnf") - Expect(err).ToNot(HaveOccurred()) - - mcpSelector, err := GetMCPSelector(mcp, mcps) - Expect(err).ToNot(HaveOccurred()) - Expect(len(mcpSelector)).To(Equal(1)) - - for key, value := range mcpSelector { - Expect(key).To(Equal("machineconfiguration.openshift.io/role")) - Expect(value).To(Equal("worker-cnf")) - break - } - }) - - It("should detect the worker MCP selector", func() { - mcp, err := GetMCP(mustGatherDirPath, "worker") - Expect(err).ToNot(HaveOccurred()) - - mcpSelector, err := GetMCPSelector(mcp, mcps) - Expect(err).ToNot(HaveOccurred()) - Expect(len(mcpSelector)).To(Equal(1)) - - for key, value := range mcpSelector { - Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/worker")) - Expect(value).To(Equal("")) - break - } - }) - }) -}) - -var _ = Describe("PerformanceProfileCreator: MCP and Node Matching in SNO", func() { - var nodes []*v1.Node - var mcps []*mcfgv1.MachineConfigPool - - BeforeEach(func() { - var err error - - nodes, err = GetNodeList(mustGatherSNODirPath) - Expect(err).ToNot(HaveOccurred()) - mcps, err = GetMCPList(mustGatherSNODirPath) - Expect(err).ToNot(HaveOccurred()) - }) - - Context("Identifying Nodes targeted by MCP in SNO", func() { - It("should find no nodes in worker MCP", func() { - mcp, err := GetMCP(mustGatherSNODirPath, "worker") - Expect(err).ToNot(HaveOccurred()) - - matchedNodes, err := GetNodesForPool(mcp, mcps, nodes) - Expect(err).ToNot(HaveOccurred()) - Expect(len(matchedNodes)).To(Equal(0)) - }) - It("should find 1 machine in master MCP", func() { - mcp, err := GetMCP(mustGatherSNODirPath, "master") - Expect(err).ToNot(HaveOccurred()) - - matchedNodes, err := GetNodesForPool(mcp, mcps, nodes) - Expect(err).ToNot(HaveOccurred()) - Expect(matchedNodes).ToNot(BeNil()) - Expect(len(matchedNodes)).To(Equal(1)) - Expect(matchedNodes[0].GetName()).To(Equal("ocp47sno-master-0.demo.lab")) - }) - }) - - Context("Ensure the correct MCP selector is used in SNO", func() { - It("should detect the worker MCP selector", func() { - mcp, err := GetMCP(mustGatherSNODirPath, "worker") - Expect(err).ToNot(HaveOccurred()) - - mcpSelector, err := GetMCPSelector(mcp, mcps) - Expect(err).ToNot(HaveOccurred()) - Expect(len(mcpSelector)).To(Equal(1)) - - for key, value := range mcpSelector { - Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/worker")) - Expect(value).To(Equal("")) - break - } - }) - It("should detect the master MCP selector", func() { - mcp, err := GetMCP(mustGatherSNODirPath, "master") - Expect(err).ToNot(HaveOccurred()) - - mcpSelector, err := GetMCPSelector(mcp, mcps) - Expect(err).ToNot(HaveOccurred()) - Expect(len(mcpSelector)).To(Equal(1)) - - for key, value := range mcpSelector { - Expect(key).To(Equal("pools.operator.machineconfiguration.openshift.io/master")) - Expect(value).To(Equal("")) - break - } - }) - }) -}) - -var _ = Describe("PerformanceProfileCreator: Getting MCP from Must Gather", func() { - var mcpName, mcpNodeSelectorKey, mustGatherDirAbsolutePath string - var err error - Context("Identifying Nodes targetted by MCP", func() { - It("gets the MCP successfully", func() { - mcpName = "worker-cnf" - mcpNodeSelectorKey = "node-role.kubernetes.io/worker-cnf" - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - mcp, err := GetMCP(mustGatherDirAbsolutePath, mcpName) - k, _ := components.GetFirstKeyAndValue(mcp.Spec.NodeSelector.MatchLabels) - Expect(err).ToNot(HaveOccurred()) - Expect(k).To(Equal(mcpNodeSelectorKey)) - }) - It("fails to get MCP as an MCP with that name doesn't exist", func() { - mcpName = "foo" - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - mcp, err := GetMCP(mustGatherDirAbsolutePath, mcpName) - Expect(mcp).To(BeNil()) - Expect(err).To(HaveOccurred()) - }) - It("fails to get MCP due to misconfigured must-gather path", func() { - mcpName = "worker-cnf" - mustGatherDirAbsolutePath, err = filepath.Abs("foo-path") - Expect(err).ToNot(HaveOccurred()) - _, err := GetMCP(mustGatherDirAbsolutePath, mcpName) - Expect(err).To(HaveOccurred()) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Getting Nodes from Must Gather", func() { - var mustGatherDirAbsolutePath string - var err error - - Context("Identifying Nodes in the cluster", func() { - It("gets the Nodes successfully", func() { - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - nodes, err := GetNodeList(mustGatherDirAbsolutePath) - Expect(err).ToNot(HaveOccurred()) - Expect(len(nodes)).To(Equal(5)) - }) - It("fails to get Nodes due to misconfigured must-gather path", func() { - mustGatherDirAbsolutePath, err = filepath.Abs("foo-path") - _, err := GetNodeList(mustGatherDirAbsolutePath) - Expect(err).To(HaveOccurred()) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Consuming GHW Snapshot from Must Gather", func() { - var mustGatherDirAbsolutePath string - var node *v1.Node - var err error - - Context("Identifying Nodes Info of the nodes cluster", func() { - It("gets the Nodes Info successfully", func() { - node = newTestNode("worker1") - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - cpuInfo, err := handle.CPU() - Expect(err).ToNot(HaveOccurred()) - Expect(len(cpuInfo.Processors)).To(Equal(2)) - Expect(int(cpuInfo.TotalCores)).To(Equal(40)) - Expect(int(cpuInfo.TotalThreads)).To(Equal(80)) - topologyInfo, err := handle.SortedTopology() - Expect(err).ToNot(HaveOccurred()) - Expect(len(topologyInfo.Nodes)).To(Equal(2)) - }) - It("fails to get Nodes Info due to misconfigured must-gather path", func() { - mustGatherDirAbsolutePath, err = filepath.Abs("foo-path") - _, err := NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).To(HaveOccurred()) - }) - It("fails to get Nodes Info for a node that does not exist", func() { - node = newTestNode("foo") - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - _, err := NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).To(HaveOccurred()) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Populating Reserved and Isolated CPUs in Performance Profile", func() { - var mustGatherDirAbsolutePath string - var node *v1.Node - var handle *GHWHandler - var splitReservedCPUsAcrossNUMA, disableHT bool - var reservedCPUCount int - var err error - - BeforeEach(func() { - node = newTestNode("worker1") - }) - Context("Check if reserved and isolated CPUs are properly populated in the performance profile", func() { - It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is disabled and disableHT is disabled", func() { - reservedCPUCount = 20 // random number, no special meaning - splitReservedCPUsAcrossNUMA = false - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,40,42,44,46,48,50,52,54,56,58")) - Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19-39,41,43,45,47,49,51,53,55,57,59-79")) - }) - It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is enabled and disableHT is disabled", func() { - reservedCPUCount = 20 // random number, no special meaning - splitReservedCPUsAcrossNUMA = true - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0-9,40-49")) - Expect(isolatedCPUSet.String()).To(Equal("10-39,50-79")) - }) - It("Errors out in case negative reservedCPUCount is specified", func() { - reservedCPUCount = -2 // random negative number, no special meaning - splitReservedCPUsAcrossNUMA = true - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).To(HaveOccurred()) - }) - It("Errors out in case specified reservedCPUCount is greater than the total CPUs present in the system and disableHT is disabled", func() { - reservedCPUCount = 100 // random positive number greater than that total number of CPUs - splitReservedCPUsAcrossNUMA = true - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).To(HaveOccurred()) - }) - It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is enabled, disableHT is disabled and number of reserved CPUs per number of NUMA nodes are odd", func() { - reservedCPUCount = 21 // random number which results in a CPU split per NUMA node (11 + 10 in this case) such that odd number of reserved CPUs (11) have to be allocated from a NUMA node - splitReservedCPUsAcrossNUMA = true - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).To(HaveOccurred()) - }) - It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is disabled,, disableHT is disabled and number of reserved CPUs are odd", func() { - reservedCPUCount = 21 // random number which results in odd number (21) of CPUs to be allocated from a NUMA node - splitReservedCPUsAcrossNUMA = false - disableHT = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).To(HaveOccurred()) - }) - It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled", func() { - reservedCPUCount = 20 // random number, no special meaning - splitReservedCPUsAcrossNUMA = false - disableHT = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38")) - Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39")) - }) - It("Ensure reserved CPUs populated are correctly when splitReservedCPUsAcrossNUMA is enabled and disableHT is enabled", func() { - reservedCPUCount = 20 // random number, no special meaning - splitReservedCPUsAcrossNUMA = true - disableHT = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0-19")) - Expect(isolatedCPUSet.String()).To(Equal("20-39")) - }) - It("Do not error out in case hyperthreading is currently enabled, splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled and number of reserved CPUs allocated from a NUMA node are odd", func() { - reservedCPUCount = 11 // random number which results in odd number (11) of CPUs to be allocated from a NUMA node - splitReservedCPUsAcrossNUMA = false - disableHT = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - }) - It("Do not error out in case hyperthreading is currently enabled, splitReservedCPUsAcrossNUMA is enabled, disableHT is enabled and number of reserved CPUs allocated from a NUMA node are odd", func() { - reservedCPUCount = 2 // random number which results in odd number (1) of CPUs to be allocated from a NUMA node - splitReservedCPUsAcrossNUMA = true - disableHT = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - }) - It("Do not error out in case of a system where hyperthreading is not enabled initially, splitReservedCPUsAcrossNUMA is disabled, disableHT is enabled and number of reserved CPUs allocated are odd", func() { - node = newTestNode("ocp47sno-master-0.demo.lab") - reservedCPUCount = 3 // random number which results in odd number (3) of CPUs to be allocated - splitReservedCPUsAcrossNUMA = false - disableHT = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherSNODirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.GetReservedAndIsolatedCPUs(reservedCPUCount, splitReservedCPUsAcrossNUMA, disableHT) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Check if Hyperthreading enabled/disabled in a system to correctly populate reserved and isolated CPUs in the performance profile", func() { - var mustGatherDirAbsolutePath string - var node *v1.Node - var handle *GHWHandler - var err error - - Context("Check if hyperthreading is enabled on the system or not", func() { - It("Ensure we detect correctly that hyperthreading is enabled on a system", func() { - node = newTestNode("worker1") - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - htEnabled, err := handle.IsHyperthreadingEnabled() - Expect(err).ToNot(HaveOccurred()) - Expect(htEnabled).To(Equal(true)) - }) - It("Ensure we detect correctly that hyperthreading is disabled on a system", func() { - node = newTestNode("worker2") - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - htEnabled, err := handle.IsHyperthreadingEnabled() - Expect(err).ToNot(HaveOccurred()) - Expect(htEnabled).To(Equal(false)) - }) - }) -}) - -var _ = Describe("PerformanceProfileCreator: Test Helper Functions getCPUsSplitAcrossNUMA and getCPUsSequentially", func() { - var mustGatherDirAbsolutePath string - var node *v1.Node - var handle *GHWHandler - var reservedCPUCount int - var topologyInfoNodes, htDisabledTopologyInfoNodes []*topology.Node - var htEnabled bool - var err error - - BeforeEach(func() { - node = newTestNode("worker1") - topologyInfoNodes = []*topology.Node{ - { - ID: 0, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 40}}, - {ID: 4, Index: 6, NumThreads: 2, LogicalProcessors: []int{2, 42}}, - {ID: 1, Index: 17, NumThreads: 2, LogicalProcessors: []int{4, 44}}, - {ID: 3, Index: 18, NumThreads: 2, LogicalProcessors: []int{6, 46}}, - {ID: 2, Index: 19, NumThreads: 2, LogicalProcessors: []int{8, 48}}, - {ID: 12, Index: 1, NumThreads: 2, LogicalProcessors: []int{10, 50}}, - {ID: 8, Index: 2, NumThreads: 2, LogicalProcessors: []int{12, 52}}, - {ID: 11, Index: 3, NumThreads: 2, LogicalProcessors: []int{14, 54}}, - {ID: 9, Index: 4, NumThreads: 2, LogicalProcessors: []int{16, 56}}, - {ID: 10, Index: 5, NumThreads: 2, LogicalProcessors: []int{18, 58}}, - {ID: 16, Index: 7, NumThreads: 2, LogicalProcessors: []int{20, 60}}, - {ID: 20, Index: 8, NumThreads: 2, LogicalProcessors: []int{22, 62}}, - {ID: 17, Index: 9, NumThreads: 2, LogicalProcessors: []int{24, 64}}, - {ID: 19, Index: 10, NumThreads: 2, LogicalProcessors: []int{26, 66}}, - {ID: 18, Index: 11, NumThreads: 2, LogicalProcessors: []int{28, 68}}, - {ID: 28, Index: 12, NumThreads: 2, LogicalProcessors: []int{30, 70}}, - {ID: 24, Index: 13, NumThreads: 2, LogicalProcessors: []int{32, 72}}, - {ID: 27, Index: 14, NumThreads: 2, LogicalProcessors: []int{34, 74}}, - {ID: 25, Index: 15, NumThreads: 2, LogicalProcessors: []int{36, 76}}, - {ID: 26, Index: 16, NumThreads: 2, LogicalProcessors: []int{38, 78}}, - }, - }, - { - ID: 1, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{1, 41}}, - {ID: 4, Index: 11, NumThreads: 2, LogicalProcessors: []int{3, 43}}, - {ID: 1, Index: 17, NumThreads: 2, LogicalProcessors: []int{5, 45}}, - {ID: 3, Index: 18, NumThreads: 2, LogicalProcessors: []int{7, 47}}, - {ID: 2, Index: 19, NumThreads: 2, LogicalProcessors: []int{9, 49}}, - {ID: 12, Index: 1, NumThreads: 2, LogicalProcessors: []int{11, 51}}, - {ID: 8, Index: 2, NumThreads: 2, LogicalProcessors: []int{13, 53}}, - {ID: 11, Index: 3, NumThreads: 2, LogicalProcessors: []int{15, 55}}, - {ID: 9, Index: 4, NumThreads: 2, LogicalProcessors: []int{17, 57}}, - {ID: 10, Index: 5, NumThreads: 2, LogicalProcessors: []int{19, 59}}, - {ID: 16, Index: 6, NumThreads: 2, LogicalProcessors: []int{21, 61}}, - {ID: 20, Index: 7, NumThreads: 2, LogicalProcessors: []int{23, 63}}, - {ID: 17, Index: 8, NumThreads: 2, LogicalProcessors: []int{25, 65}}, - {ID: 19, Index: 9, NumThreads: 2, LogicalProcessors: []int{27, 67}}, - {ID: 18, Index: 10, NumThreads: 2, LogicalProcessors: []int{29, 69}}, - {ID: 28, Index: 12, NumThreads: 2, LogicalProcessors: []int{31, 71}}, - {ID: 24, Index: 13, NumThreads: 2, LogicalProcessors: []int{33, 73}}, - {ID: 27, Index: 14, NumThreads: 2, LogicalProcessors: []int{35, 75}}, - {ID: 25, Index: 15, NumThreads: 2, LogicalProcessors: []int{37, 77}}, - {ID: 26, Index: 16, NumThreads: 2, LogicalProcessors: []int{39, 79}}, - }, - }, - } - - htDisabledTopologyInfoNodes = []*topology.Node{ - { - ID: 0, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 1, LogicalProcessors: []int{0}}, - {ID: 4, Index: 6, NumThreads: 1, LogicalProcessors: []int{2}}, - {ID: 1, Index: 17, NumThreads: 1, LogicalProcessors: []int{4}}, - {ID: 3, Index: 18, NumThreads: 1, LogicalProcessors: []int{6}}, - {ID: 2, Index: 19, NumThreads: 1, LogicalProcessors: []int{8}}, - {ID: 12, Index: 1, NumThreads: 1, LogicalProcessors: []int{10}}, - {ID: 8, Index: 2, NumThreads: 1, LogicalProcessors: []int{12}}, - {ID: 11, Index: 3, NumThreads: 1, LogicalProcessors: []int{14}}, - {ID: 9, Index: 4, NumThreads: 1, LogicalProcessors: []int{16}}, - {ID: 10, Index: 5, NumThreads: 1, LogicalProcessors: []int{18}}, - {ID: 16, Index: 7, NumThreads: 1, LogicalProcessors: []int{20}}, - {ID: 20, Index: 8, NumThreads: 1, LogicalProcessors: []int{22}}, - {ID: 17, Index: 9, NumThreads: 1, LogicalProcessors: []int{24}}, - {ID: 19, Index: 10, NumThreads: 1, LogicalProcessors: []int{26}}, - {ID: 18, Index: 11, NumThreads: 1, LogicalProcessors: []int{28}}, - {ID: 28, Index: 12, NumThreads: 1, LogicalProcessors: []int{30}}, - {ID: 24, Index: 13, NumThreads: 1, LogicalProcessors: []int{32}}, - {ID: 27, Index: 14, NumThreads: 1, LogicalProcessors: []int{34}}, - {ID: 25, Index: 15, NumThreads: 1, LogicalProcessors: []int{36}}, - {ID: 26, Index: 16, NumThreads: 1, LogicalProcessors: []int{38}}, - }, - }, - { - ID: 1, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 1, LogicalProcessors: []int{1}}, - {ID: 4, Index: 11, NumThreads: 1, LogicalProcessors: []int{3}}, - {ID: 1, Index: 17, NumThreads: 1, LogicalProcessors: []int{5}}, - {ID: 3, Index: 18, NumThreads: 1, LogicalProcessors: []int{7}}, - {ID: 2, Index: 19, NumThreads: 1, LogicalProcessors: []int{9}}, - {ID: 12, Index: 1, NumThreads: 1, LogicalProcessors: []int{11}}, - {ID: 8, Index: 2, NumThreads: 1, LogicalProcessors: []int{13}}, - {ID: 11, Index: 3, NumThreads: 1, LogicalProcessors: []int{15}}, - {ID: 9, Index: 4, NumThreads: 1, LogicalProcessors: []int{17}}, - {ID: 10, Index: 5, NumThreads: 1, LogicalProcessors: []int{19}}, - {ID: 16, Index: 6, NumThreads: 1, LogicalProcessors: []int{21}}, - {ID: 20, Index: 7, NumThreads: 1, LogicalProcessors: []int{23}}, - {ID: 17, Index: 8, NumThreads: 1, LogicalProcessors: []int{25}}, - {ID: 19, Index: 9, NumThreads: 1, LogicalProcessors: []int{27}}, - {ID: 18, Index: 10, NumThreads: 1, LogicalProcessors: []int{29}}, - {ID: 28, Index: 12, NumThreads: 1, LogicalProcessors: []int{31}}, - {ID: 24, Index: 13, NumThreads: 1, LogicalProcessors: []int{33}}, - {ID: 27, Index: 14, NumThreads: 1, LogicalProcessors: []int{35}}, - {ID: 25, Index: 15, NumThreads: 1, LogicalProcessors: []int{37}}, - {ID: 26, Index: 16, NumThreads: 1, LogicalProcessors: []int{39}}, - }, - }, - } - }) - Context("Check if getCPUsSplitAcrossNUMA and getCPUsSequentially are working correctly and reserved and isolated CPUs are properly populated in the performance profile", func() { - It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSplitAcrossNUMAwhen when splitReservedCPUsAcrossNUMA is enabled", func() { - reservedCPUCount = 20 // random number, no special meaning - htEnabled = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0-9,40-49")) - Expect(isolatedCPUSet.String()).To(Equal("10-39,50-79")) - }) - It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSplitAcrossNUMAwhen when splitReservedCPUsAcrossNUMA is enabled and htEnabled is disabled ", func() { - reservedCPUCount = 20 // random number, no special meaning - htEnabled = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, htDisabledTopologyInfoNodes) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0-19")) - Expect(isolatedCPUSet.String()).To(Equal("20-39")) - }) - It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is enabled, htEnabled is enabled and the number of reserved CPUs per number of NUMA nodes are odd", func() { - reservedCPUCount = 21 // random number which results in a CPU split per NUMA node (11 + 10 in this case) such that odd number of reserved CPUs (11) have to be allocated from a NUMA node - htEnabled = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).To(HaveOccurred()) - }) - It("Works without error in case hyperthreading is disabled, splitReservedCPUsAcrossNUMA is enabled, htEnabled is disabled and number of reserved CPUs per number of NUMA nodes are odd", func() { - reservedCPUCount = 11 // random number which results in a CPU split per NUMA node (5 + 6 in this case) such that odd number of reserved CPUs (5) have to be allocated from a NUMA node - htEnabled = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.getCPUsSplitAcrossNUMA(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).ToNot(HaveOccurred()) - }) - It("Ensure reserved and isolated CPUs populated are correctly by getCPUsSequentially when splitReservedCPUsAcrossNUMA is disabled", func() { - reservedCPUCount = 20 // random number, no special meaning - htEnabled = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - reservedCPUSet, isolatedCPUSet, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.String()).To(Equal("0,2,4,6,8,10,12,14,16,18,40,42,44,46,48,50,52,54,56,58")) - Expect(isolatedCPUSet.String()).To(Equal("1,3,5,7,9,11,13,15,17,19-39,41,43,45,47,49,51,53,55,57,59-79")) - }) - It("Errors out in case hyperthreading is enabled, splitReservedCPUsAcrossNUMA is disabled and number of reserved CPUs are odd", func() { - reservedCPUCount = 21 // random number which results in odd number (21) of CPUs to be allocated from a NUMA node - htEnabled = true - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).To(HaveOccurred()) - }) - It("Works without error in case hyperthreading is disabled, splitReservedCPUsAcrossNUMA is disabled and number of reserved CPUs are odd", func() { - reservedCPUCount = 11 // random number which results in odd number (11) of CPUs to be allocated from a NUMA node - htEnabled = false - mustGatherDirAbsolutePath, err = filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - handle, err = NewGHWHandler(mustGatherDirAbsolutePath, node) - Expect(err).ToNot(HaveOccurred()) - _, _, err := handle.getCPUsSequentially(reservedCPUCount, htEnabled, topologyInfoNodes) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Ensuring Nodes hardware equality", func() { - Context("Testing matching nodes with the same hardware ", func() { - It("should pass hardware equality test", func() { - mustGatherDirAbsolutePath, err := filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - - node1, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml") - Expect(err).ToNot(HaveOccurred()) - node1Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node1) - Expect(err).ToNot(HaveOccurred()) - - node2, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml") - Expect(err).ToNot(HaveOccurred()) - node2Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node2) - Expect(err).ToNot(HaveOccurred()) - - nodeHandles := []*GHWHandler{node1Handle, node2Handle} - err = EnsureNodesHaveTheSameHardware(nodeHandles) - Expect(err).ToNot(HaveOccurred()) - }) - }) - - Context("Testing matching nodes with different hardware ", func() { - It("should fail hardware equality test", func() { - mustGatherDirAbsolutePath, err := filepath.Abs(mustGatherDirPath) - Expect(err).ToNot(HaveOccurred()) - - node1, err := getNode(mustGatherDirAbsolutePath, "worker1.yaml") - Expect(err).ToNot(HaveOccurred()) - node1Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node1) - Expect(err).ToNot(HaveOccurred()) - - node2, err := getNode(mustGatherDirAbsolutePath, "worker2.yaml") - Expect(err).ToNot(HaveOccurred()) - node2Handle, err := NewGHWHandler(mustGatherDirAbsolutePath, node2) - Expect(err).ToNot(HaveOccurred()) - - nodeHandles := []*GHWHandler{node1Handle, node2Handle} - err = EnsureNodesHaveTheSameHardware(nodeHandles) - Expect(err).To(HaveOccurred()) - }) - }) -}) - -var _ = Describe("PerformanceProfileCreator: Test Helper Function ensureSameTopology", func() { - var nodes2 []*topology.Node - var topology2 topology.Info - - nodes1 := []*topology.Node{ - { - ID: 0, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}}, - {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}}, - }, - }, - { - ID: 1, - Cores: []*cpu.ProcessorCore{ - {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}}, - {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}}, - }, - }, - } - topology1 := topology.Info{ - Architecture: topology.ARCHITECTURE_NUMA, - Nodes: nodes1, - } - - BeforeEach(func() { - nodes2 = []*topology.Node{ - { - ID: 0, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}}, - {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}}, - }, - }, - { - ID: 1, - Cores: []*cpu.ProcessorCore{ - {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}}, - {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}}, - }, - }, - } - topology2 = topology.Info{ - Architecture: topology.ARCHITECTURE_NUMA, - Nodes: nodes2, - } - }) - - Context("Check if ensureSameTopology is working correctly", func() { - It("nodes with similar topology should not return error", func() { - err := ensureSameTopology(&topology1, &topology2) - Expect(err).ToNot(HaveOccurred()) - }) - It("nodes with different architecture should return error", func() { - topology2.Architecture = topology.ARCHITECTURE_SMP - err := ensureSameTopology(&topology1, &topology2) - Expect(err).To(HaveOccurred()) - }) - It("nodes with different number of NUMA nodes should return error", func() { - topology2.Nodes = topology2.Nodes[1:] - err := ensureSameTopology(&topology1, &topology2) - Expect(err).To(HaveOccurred()) - }) - It("nodes with different number threads per core should return error", func() { - topology2.Nodes[1].Cores[1].NumThreads = 1 - err := ensureSameTopology(&topology1, &topology2) - Expect(err).To(HaveOccurred()) - }) - It("nodes with different thread IDs should return error", func() { - topology2.Nodes[1].Cores[1].LogicalProcessors[1] = 15 - err := ensureSameTopology(&topology1, &topology2) - Expect(err).To(HaveOccurred()) - }) - }) -}) - -var _ = Describe("PerformanceProfileCreator: Test Helper Function GetAdditionalKernelArgs", func() { - var powerMode string - var disableHT bool - Context("Ensure kernel args are populated correctly", func() { - It("Ensure kernel args are populated correctly in case of low-latency ", func() { - powerMode = "default" - disableHT = false - kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT) - Expect(kernelArgs).To(BeEquivalentTo([]string{})) - }) - - }) - Context("Ensure kernel args are populated correctly", func() { - It("Ensure kernel args are populated correctly in case of low-latency ", func() { - powerMode = "low-latency" - disableHT = false - args := []string{"audit=0", - "mce=off", - "nmi_watchdog=0", - } - kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT) - sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order - Expect(kernelArgs).To(BeEquivalentTo(args)) - }) - - }) - Context("Ensure kernel args are populated correctly", func() { - It("Ensure kernel args are populated correctly in case of ultra-low-latency ", func() { - powerMode = "ultra-low-latency" - disableHT = false - args := []string{"audit=0", - "idle=poll", - "intel_idle.max_cstate=0", - "mce=off", - "nmi_watchdog=0", - "processor.max_cstate=1", - } - kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT) - sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order - Expect(kernelArgs).To(BeEquivalentTo(args)) - }) - - }) - Context("Ensure kernel args are populated correctly", func() { - It("Ensure kernel args are populated correctly in case of disableHT=true ", func() { - powerMode = "ultra-low-latency" - disableHT = true - args := []string{"audit=0", - "idle=poll", - "intel_idle.max_cstate=0", - "mce=off", - "nmi_watchdog=0", - "nosmt", - "processor.max_cstate=1", - } - kernelArgs := GetAdditionalKernelArgs(powerMode, disableHT) - sort.Strings(kernelArgs) // sort to avoid inequality due to difference in order - Expect(kernelArgs).To(BeEquivalentTo(args)) - }) - - }) -}) - -var _ = Describe("PerformanceProfileCreator: Test Helper cpuAccumulator", func() { - nodes1 := []*topology.Node{ - { - ID: 0, - Cores: []*cpu.ProcessorCore{ - {ID: 0, Index: 0, NumThreads: 2, LogicalProcessors: []int{0, 1}}, - {ID: 1, Index: 1, NumThreads: 2, LogicalProcessors: []int{2, 3}}, - }, - }, - { - ID: 1, - Cores: []*cpu.ProcessorCore{ - {ID: 2, Index: 2, NumThreads: 2, LogicalProcessors: []int{4, 5}}, - {ID: 3, Index: 3, NumThreads: 2, LogicalProcessors: []int{6, 7}}, - }, - }, - } - topology1 := topology.Info{ - Architecture: topology.ARCHITECTURE_NUMA, - Nodes: nodes1, - } - - Context("Check if cpuAccumulator is working correctly", func() { - It("should accumulate allCores", func() { - acc := newCPUAccumulator() - for _, node := range topology1.Nodes { - acc.AddCores(allCores, node.Cores) - } - cores := acc.Result().ToSlice() - Expect(cores).Should(Equal([]int{0, 1, 2, 3, 4, 5, 6, 7})) - }) - It("should accumulate cores up to the max", func() { - acc := newCPUAccumulator() - for _, node := range topology1.Nodes { - acc.AddCores(3, node.Cores) - } - cores := acc.Result().ToSlice() - Expect(cores).Should(Equal([]int{0, 1, 2})) - }) - - }) -}) diff --git a/pkg/pao/utils/testing/testing.go b/pkg/pao/utils/testing/testing.go deleted file mode 100644 index 461ee6987..000000000 --- a/pkg/pao/utils/testing/testing.go +++ /dev/null @@ -1,100 +0,0 @@ -package testing - -import ( - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" -) - -const ( - // HugePageSize defines the huge page size used for tests - HugePageSize = performancev2.HugePageSize("1G") - // HugePagesCount defines the huge page count used for tests - HugePagesCount = 4 - // IsolatedCPUs defines the isolated CPU set used for tests - IsolatedCPUs = performancev2.CPUSet("4-7") - // ReservedCPUs defines the reserved CPU set used for tests - ReservedCPUs = performancev2.CPUSet("0-3") - // SingleNUMAPolicy defines the topologyManager policy used for tests - SingleNUMAPolicy = "single-numa-node" - - //MachineConfigLabelKey defines the MachineConfig label key of the test profile - MachineConfigLabelKey = "mcKey" - //MachineConfigLabelValue defines the MachineConfig label vlue of the test profile - MachineConfigLabelValue = "mcValue" - //MachineConfigPoolLabelKey defines the MachineConfigPool label key of the test profile - MachineConfigPoolLabelKey = "mcpKey" - //MachineConfigPoolLabelValue defines the MachineConfigPool label value of the test profile - MachineConfigPoolLabelValue = "mcpValue" -) - -// NewPerformanceProfile returns new performance profile object that used for tests -func NewPerformanceProfile(name string) *performancev2.PerformanceProfile { - size := HugePageSize - isolatedCPUs := IsolatedCPUs - reservedCPUs := ReservedCPUs - numaPolicy := SingleNUMAPolicy - - return &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{Kind: "PerformanceProfile"}, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - UID: types.UID("11111111-1111-1111-1111-1111111111111"), - }, - Spec: performancev2.PerformanceProfileSpec{ - CPU: &performancev2.CPU{ - Isolated: &isolatedCPUs, - Reserved: &reservedCPUs, - }, - HugePages: &performancev2.HugePages{ - DefaultHugePagesSize: &size, - Pages: []performancev2.HugePage{ - { - Count: HugePagesCount, - Size: size, - }, - }, - }, - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NUMA: &performancev2.NUMA{ - TopologyPolicy: &numaPolicy, - }, - MachineConfigLabel: map[string]string{ - MachineConfigLabelKey: MachineConfigLabelValue, - }, - MachineConfigPoolSelector: map[string]string{ - MachineConfigPoolLabelKey: MachineConfigPoolLabelValue, - }, - NodeSelector: map[string]string{ - "nodekey": "nodeValue", - }, - }, - } -} - -// NewProfileMCP returns new MCP used for testing -func NewProfileMCP() *mcov1.MachineConfigPool { - return &mcov1.MachineConfigPool{ - TypeMeta: metav1.TypeMeta{Kind: "MachineConfigPool"}, - ObjectMeta: metav1.ObjectMeta{ - Name: "test", - UID: "11111111-1111-1111-1111-1111111111111", - Labels: map[string]string{ - MachineConfigPoolLabelKey: MachineConfigPoolLabelValue, - }, - }, - Spec: mcov1.MachineConfigPoolSpec{ - NodeSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"nodekey": "nodeValue"}, - }, - MachineConfigSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{MachineConfigLabelKey: MachineConfigLabelValue}, - }, - }, - } -} diff --git a/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml deleted file mode 100644 index 75d1dde24..000000000 --- a/test/e2e/pao/cluster-setup/base/performance/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - machine_config_pool.yaml - diff --git a/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml b/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml deleted file mode 100644 index 03a6e18c4..000000000 --- a/test/e2e/pao/cluster-setup/base/performance/machine_config_pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-cnf - namespace: openshift-machine-config-operator - labels: - machineconfiguration.openshift.io/role: worker-cnf -spec: - paused: true - machineConfigSelector: - matchExpressions: - - key: machineconfiguration.openshift.io/role - operator: In - values: [worker,worker-cnf] - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-cnf: "" diff --git a/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml deleted file mode 100644 index 87722061e..000000000 --- a/test/e2e/pao/cluster-setup/ci-cluster/performance/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -bases: - - ../../base/performance - diff --git a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml deleted file mode 100644 index beac1882f..000000000 --- a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/kustomization.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -bases: - - ../../mcp-only-cluster/performance - -resources: - - performance_profile.yaml - diff --git a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml b/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml deleted file mode 100644 index 36d890ef5..000000000 --- a/test/e2e/pao/cluster-setup/ci-upgrade-test-cluster/performance/performance_profile.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: performance.openshift.io/v1alpha1 -kind: PerformanceProfile -metadata: - name: ci-upgrade-test -spec: - additionalKernelArgs: - - "nmi_watchdog=0" - - "audit=0" - - "mce=off" - - "processor.max_cstate=1" - - "idle=poll" - - "intel_idle.max_cstate=0" - cpu: - isolated: "1-3" - reserved: "0" - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 1 - node: 0 - realTimeKernel: - enabled: true - numa: - topologyPolicy: "single-numa-node" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" diff --git a/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml deleted file mode 100644 index 69a20cd13..000000000 --- a/test/e2e/pao/cluster-setup/manual-cluster/performance/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -bases: - - ../../base/performance - -resources: - - performance_profile.yaml diff --git a/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml b/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml deleted file mode 100644 index ed6c4e47e..000000000 --- a/test/e2e/pao/cluster-setup/manual-cluster/performance/performance_profile.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: manual -spec: - additionalKernelArgs: - - "nmi_watchdog=0" - - "audit=0" - - "mce=off" - - "processor.max_cstate=1" - - "idle=poll" - - "intel_idle.max_cstate=0" - cpu: - isolated: "1-3" - reserved: "0" - hugepages: - defaultHugepagesSize: "1G" - pages: - - size: "1G" - count: 1 - node: 0 - - size: "2M" - count: 128 - realTimeKernel: - enabled: true - numa: - topologyPolicy: "single-numa-node" - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" diff --git a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml deleted file mode 100644 index b07d9d059..000000000 --- a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/kustomization.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - machine_config_pool.yaml diff --git a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml b/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml deleted file mode 100644 index 03a6e18c4..000000000 --- a/test/e2e/pao/cluster-setup/mcp-only-cluster/performance/machine_config_pool.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - name: worker-cnf - namespace: openshift-machine-config-operator - labels: - machineconfiguration.openshift.io/role: worker-cnf -spec: - paused: true - machineConfigSelector: - matchExpressions: - - key: machineconfiguration.openshift.io/role - operator: In - values: [worker,worker-cnf] - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-cnf: "" diff --git a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go deleted file mode 100644 index 314d823d3..000000000 --- a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/performance_profile_creator_suite_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package __performance_profile_creator_test - -import ( - "testing" - - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -func TestPerformanceProfileCreator(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("performance_profile_creator")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Profile Creator tests", rr) -} diff --git a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go b/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go deleted file mode 100644 index 1de524feb..000000000 --- a/test/e2e/pao/functests-performance-profile-creator/1_performance-profile_creator/ppc.go +++ /dev/null @@ -1,263 +0,0 @@ -package __performance_profile_creator - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "path" - "path/filepath" - "strings" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "github.com/ghodss/yaml" - - "github.com/openshift/cluster-node-tuning-operator/cmd/performance-profile-creator/cmd" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" -) - -const ( - mustGatherPath = "../../test/e2e/pao/testdata/must-gather" - expectedProfilesPath = "../../test/e2e/pao/testdata/ppc-expected-profiles" - expectedInfoPath = "../../test/e2e/pao/testdata/ppc-expected-info" - ppcPath = "../../build/_output/bin/performance-profile-creator" -) - -var mustGatherFullPath = path.Join(mustGatherPath, "must-gather.bare-metal") - -var defaultArgs = []string{ - "--disable-ht=false", - "--mcp-name=worker-cnf", - "--rt-kernel=true", - "--user-level-networking=false", - "--profile-name=Performance", - fmt.Sprintf("--must-gather-dir-path=%s", mustGatherFullPath), -} - -var _ = Describe("[rfe_id:OCP-38968][ppc] Performance Profile Creator", func() { - It("[test_id:OCP-40940] performance profile creator regression tests", func() { - Expect(ppcPath).To(BeAnExistingFile()) - - // directory base name => full path - mustGatherDirs := getMustGatherDirs(mustGatherPath) - // full profile path => arguments the profile was created with - expectedProfiles := getExpectedProfiles(expectedProfilesPath, mustGatherDirs) - - for expectedProfilePath, args := range expectedProfiles { - cmdArgs := []string{ - fmt.Sprintf("--disable-ht=%v", args.DisableHT), - fmt.Sprintf("--mcp-name=%s", args.MCPName), - fmt.Sprintf("--must-gather-dir-path=%s", args.MustGatherDirPath), - fmt.Sprintf("--reserved-cpu-count=%d", args.ReservedCPUCount), - fmt.Sprintf("--rt-kernel=%v", args.RTKernel), - fmt.Sprintf("--split-reserved-cpus-across-numa=%v", args.SplitReservedCPUsAcrossNUMA), - } - - if args.UserLevelNetworking != nil { - cmdArgs = append(cmdArgs, fmt.Sprintf("--user-level-networking=%v", *args.UserLevelNetworking)) - } - - // do not pass empty strings for optional args - if len(args.ProfileName) > 0 { - cmdArgs = append(cmdArgs, fmt.Sprintf("--profile-name=%s", args.ProfileName)) - } - if len(args.PowerConsumptionMode) > 0 { - cmdArgs = append(cmdArgs, fmt.Sprintf("--power-consumption-mode=%s", args.PowerConsumptionMode)) - } - if len(args.TMPolicy) > 0 { - cmdArgs = append(cmdArgs, fmt.Sprintf("--topology-manager-policy=%s", args.TMPolicy)) - } - - out, err := testutils.ExecAndLogCommand(ppcPath, cmdArgs...) - Expect(err).To(BeNil(), "failed to run ppc for '%s': %v", expectedProfilePath, err) - - profile := &performancev2.PerformanceProfile{} - err = yaml.Unmarshal(out, profile) - Expect(err).To(BeNil(), "failed to unmarshal the output yaml for '%s': %v", expectedProfilePath, err) - - bytes, err := ioutil.ReadFile(expectedProfilePath) - Expect(err).To(BeNil(), "failed to read the expected yaml for '%s': %v", expectedProfilePath, err) - - expectedProfile := &performancev2.PerformanceProfile{} - err = yaml.Unmarshal(bytes, expectedProfile) - Expect(err).To(BeNil(), "failed to unmarshal the expected yaml for '%s': %v", expectedProfilePath, err) - - Expect(profile).To(BeEquivalentTo(expectedProfile), "regression test failed for '%s' case", expectedProfilePath) - } - }) - - It("should describe the cluster from must-gather data in info mode", func() { - Expect(ppcPath).To(BeAnExistingFile()) - - // directory base name => full path - mustGatherDirs := getMustGatherDirs(mustGatherPath) - - for name, path := range mustGatherDirs { - cmdArgs := []string{ - "--info=json", - fmt.Sprintf("--must-gather-dir-path=%s", path), - } - - out, err := testutils.ExecAndLogCommand(ppcPath, cmdArgs...) - Expect(err).To(BeNil(), "failed to run ppc for %q: %v", path, err) - - var cInfo cmd.ClusterInfo - err = json.Unmarshal(out, &cInfo) - Expect(err).To(BeNil(), "failed to unmarshal the output json for %q: %v", path, err) - expectedClusterInfoPath := filepath.Join(expectedInfoPath, fmt.Sprintf("%s.json", name)) - bytes, err := ioutil.ReadFile(expectedClusterInfoPath) - Expect(err).To(BeNil(), "failed to read the expected json for %q: %v", expectedClusterInfoPath, err) - - var expectedInfo cmd.ClusterInfo - err = json.Unmarshal(bytes, &expectedInfo) - Expect(err).To(BeNil(), "failed to unmarshal the expected json for '%s': %v", expectedClusterInfoPath, err) - - expectedInfo.Sort() - - Expect(cInfo).To(BeEquivalentTo(expectedInfo), "regression test failed for '%s' case", expectedClusterInfoPath) - } - }) - Context("Systems with Hyperthreading enabled", func() { - It("[test_id:41419] Verify PPC script fails when reserved cpu count is 2 and requires to split across numa nodes", func() { - Expect(ppcPath).To(BeAnExistingFile()) - Expect(mustGatherFullPath).To(BeADirectory()) - ppcArgs := []string{ - "--reserved-cpu-count=2", - "--split-reserved-cpus-across-numa=true", - } - cmdArgs := append(defaultArgs, ppcArgs...) - _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...) - ppcErrorString := errorStringParser(errData) - Expect(ppcErrorString).To(ContainSubstring("can't allocate odd number of CPUs from a NUMA Node")) - }) - - It("[test_id:41405] Verify PPC fails when splitting of reserved cpus and single numa-node policy is specified", func() { - Expect(ppcPath).To(BeAnExistingFile()) - Expect(mustGatherFullPath).To(BeADirectory()) - ppcArgs := []string{ - fmt.Sprintf("--reserved-cpu-count=%d", 2), - fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true), - fmt.Sprintf("--topology-manager-policy=%s", "single-numa-node"), - } - cmdArgs := append(defaultArgs, ppcArgs...) - _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...) - ppcErrorString := errorStringParser(errData) - Expect(ppcErrorString).To(ContainSubstring("not appropriate to split reserved CPUs in case of topology-manager-policy: single-numa-node")) - }) - - It("[test_id:41420] Verify PPC fails when reserved cpu count is more than available cpus", func() { - Expect(ppcPath).To(BeAnExistingFile()) - Expect(mustGatherFullPath).To(BeADirectory()) - ppcArgs := []string{ - fmt.Sprintf("--reserved-cpu-count=%d", 100), - } - cmdArgs := append(defaultArgs, ppcArgs...) - _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...) - ppcErrorString := errorStringParser(errData) - Expect(ppcErrorString).To(ContainSubstring("please specify the reserved CPU count in the range")) - }) - - It("[test_id:41421] Verify PPC fails when odd number of reserved cpus are specified", func() { - Expect(ppcPath).To(BeAnExistingFile()) - Expect(mustGatherFullPath).To(BeADirectory()) - ppcArgs := []string{ - fmt.Sprintf("--reserved-cpu-count=%d", 5), - } - cmdArgs := append(defaultArgs, ppcArgs...) - _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...) - ppcErrorString := errorStringParser(errData) - Expect(ppcErrorString).To(ContainSubstring("can't allocate odd number of CPUs from a NUMA Node")) - }) - }) - Context("Systems with Hyperthreading disabled", func() { - It("[test_id:42035] verify PPC fails when splitting of reserved cpus and single numa-node policy is specified", func() { - Expect(ppcPath).To(BeAnExistingFile()) - Expect(mustGatherFullPath).To(BeADirectory()) - ppcArgs := []string{ - fmt.Sprintf("--reserved-cpu-count=%d", 2), - fmt.Sprintf("--split-reserved-cpus-across-numa=%t", true), - fmt.Sprintf("--topology-manager-policy=%s", "single-numa-node"), - } - cmdArgs := append(defaultArgs, ppcArgs...) - _, errData, _ := testutils.ExecAndLogCommandWithStderr(ppcPath, cmdArgs...) - ppcErrorString := errorStringParser(errData) - Expect(ppcErrorString).To(ContainSubstring("not appropriate to split reserved CPUs in case of topology-manager-policy: single-numa-node")) - }) - }) -}) - -func getMustGatherDirs(mustGatherPath string) map[string]string { - Expect(mustGatherPath).To(BeADirectory()) - - mustGatherDirs := make(map[string]string) - mustGatherPathContent, err := ioutil.ReadDir(mustGatherPath) - Expect(err).To(BeNil(), fmt.Errorf("can't list '%s' files: %v", mustGatherPath, err)) - - for _, file := range mustGatherPathContent { - fullFilePath := filepath.Join(mustGatherPath, file.Name()) - Expect(fullFilePath).To(BeADirectory()) - - mustGatherDirs[file.Name()] = fullFilePath - } - - return mustGatherDirs -} - -func getExpectedProfiles(expectedProfilesPath string, mustGatherDirs map[string]string) map[string]cmd.ProfileCreatorArgs { - Expect(expectedProfilesPath).To(BeADirectory()) - - expectedProfilesPathContent, err := ioutil.ReadDir(expectedProfilesPath) - Expect(err).To(BeNil(), fmt.Errorf("can't list '%s' files: %v", expectedProfilesPath, err)) - - // read ppc params files - ppcParams := make(map[string]cmd.ProfileCreatorArgs) - for _, file := range expectedProfilesPathContent { - if filepath.Ext(file.Name()) != ".json" { - continue - } - - fullFilePath := filepath.Join(expectedProfilesPath, file.Name()) - bytes, err := ioutil.ReadFile(fullFilePath) - Expect(err).To(BeNil(), "failed to read the ppc params file for '%s': %v", fullFilePath, err) - - var ppcArgs cmd.ProfileCreatorArgs - err = json.Unmarshal(bytes, &ppcArgs) - Expect(err).To(BeNil(), "failed to decode the ppc params file for '%s': %v", fullFilePath, err) - - Expect(ppcArgs.MustGatherDirPath).ToNot(BeEmpty(), "must-gather arg missing for '%s'", fullFilePath) - ppcArgs.MustGatherDirPath = path.Join(mustGatherPath, ppcArgs.MustGatherDirPath) - Expect(ppcArgs.MustGatherDirPath).To(BeADirectory()) - - profileKey := strings.TrimSuffix(file.Name(), filepath.Ext(file.Name())) - ppcParams[profileKey] = ppcArgs - } - - // pickup profile files - expectedProfiles := make(map[string]cmd.ProfileCreatorArgs) - for _, file := range expectedProfilesPathContent { - if filepath.Ext(file.Name()) != ".yaml" { - continue - } - profileKey := strings.TrimSuffix(file.Name(), filepath.Ext(file.Name())) - ppcArgs, ok := ppcParams[profileKey] - Expect(ok).To(BeTrue(), "can't find ppc params for the expected profile: '%s'", file.Name()) - - fullFilePath := filepath.Join(expectedProfilesPath, file.Name()) - expectedProfiles[fullFilePath] = ppcArgs - } - - return expectedProfiles -} - -// PPC stderr parser -func errorStringParser(errData []byte) string { - stdError := string(errData) - for _, line := range strings.Split(stdError, "\n") { - if strings.Contains(line, "Error: ") { - return line - } - } - return "" -} diff --git a/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go b/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go deleted file mode 100644 index 11690a8f0..000000000 --- a/test/e2e/pao/functests-render-command/1_render_command/render_suite_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package __render_command_test - -import ( - "fmt" - "path/filepath" - "runtime" - "testing" - - "github.com/ghodss/yaml" - "github.com/google/go-cmp/cmp" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" -) - -var ( - testDir string - workspaceDir string - binPath string -) - -func TestRenderCmd(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("render_manifests")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Operator render tests", rr) -} - -var _ = BeforeSuite(func() { - _, file, _, ok := runtime.Caller(0) - if !ok { - Fail("Cannot retrieve test directory") - } - - testDir = filepath.Dir(file) - workspaceDir = filepath.Clean(filepath.Join(testDir, "..", "..")) - binPath = filepath.Clean(filepath.Join(workspaceDir, "build", "_output", "bin")) - fmt.Fprintf(GinkgoWriter, "using binary at %q\n", binPath) -}) - -func getFilesDiff(wantFile, gotFile []byte) (string, error) { - var wantObj interface{} - var gotObj interface{} - - if err := yaml.Unmarshal(wantFile, &wantObj); err != nil { - return "", fmt.Errorf("failed to unmarshal data for 'want':%s", err) - } - - if err := yaml.Unmarshal(gotFile, &gotObj); err != nil { - return "", fmt.Errorf("failed to unmarshal data for 'got':%s", err) - } - - return cmp.Diff(wantObj, gotObj), nil -} diff --git a/test/e2e/pao/functests-render-command/1_render_command/render_test.go b/test/e2e/pao/functests-render-command/1_render_command/render_test.go deleted file mode 100644 index 3928f3926..000000000 --- a/test/e2e/pao/functests-render-command/1_render_command/render_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package __render_command_test - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" -) - -var ( - assetsOutDir string - assetsInDir string - ppInFiles string - testDataPath string -) - -var _ = Describe("render command e2e test", func() { - - BeforeEach(func() { - assetsOutDir = createTempAssetsDir() - assetsInDir = filepath.Join(workspaceDir, "build", "assets") - ppInFiles = filepath.Join(workspaceDir, "test", "e2e", "pao", "cluster-setup", "manual-cluster", "performance", "performance_profile.yaml") - testDataPath = filepath.Join(workspaceDir, "test", "e2e", "pao", "testdata") - }) - - Context("With a single performance-profile", func() { - It("Gets cli args and produces the expected components to output directory", func() { - - cmdline := []string{ - filepath.Join(binPath, "performance-addon-operators"), - "render", - "--performance-profile-input-files", ppInFiles, - "--asset-input-dir", assetsInDir, - "--asset-output-dir", assetsOutDir, - } - fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline) - - cmd := exec.Command(cmdline[0], cmdline[1:]...) - runAndCompare(cmd) - - }) - - It("Gets environment variables and produces the expected components to output directory", func() { - cmdline := []string{ - filepath.Join(binPath, "performance-addon-operators"), - "render", - } - fmt.Fprintf(GinkgoWriter, "running: %v\n", cmdline) - - cmd := exec.Command(cmdline[0], cmdline[1:]...) - cmd.Env = append(cmd.Env, - fmt.Sprintf("PERFORMANCE_PROFILE_INPUT_FILES=%s", ppInFiles), - fmt.Sprintf("ASSET_INPUT_DIR=%s", assetsInDir), - fmt.Sprintf("ASSET_OUTPUT_DIR=%s", assetsOutDir), - ) - runAndCompare(cmd) - }) - }) - - AfterEach(func() { - cleanArtifacts() - }) - -}) - -func createTempAssetsDir() string { - assets, err := ioutil.TempDir("", "assets") - Expect(err).ToNot(HaveOccurred()) - fmt.Printf("assets` output dir at: %q\n", assets) - return assets -} - -func cleanArtifacts() { - os.RemoveAll(assetsOutDir) -} - -func runAndCompare(cmd *exec.Cmd) { - _, err := cmd.Output() - Expect(err).ToNot(HaveOccurred()) - - outputAssetsFiles, err := ioutil.ReadDir(assetsOutDir) - Expect(err).ToNot(HaveOccurred()) - - refPath := filepath.Join(testDataPath, "render-expected-output") - fmt.Fprintf(GinkgoWriter, "reference data at: %q\n", refPath) - - for _, f := range outputAssetsFiles { - refData, err := ioutil.ReadFile(filepath.Join(refPath, f.Name())) - Expect(err).ToNot(HaveOccurred()) - - data, err := ioutil.ReadFile(filepath.Join(assetsOutDir, f.Name())) - Expect(err).ToNot(HaveOccurred()) - - diff, err := getFilesDiff(data, refData) - Expect(err).ToNot(HaveOccurred()) - Expect(diff).To(BeZero(), "rendered %s file is not identical to its reference file; diff: %v", - f.Name(), - diff) - } -} diff --git a/test/e2e/pao/functests/0_config/config.go b/test/e2e/pao/functests/0_config/config.go deleted file mode 100644 index 8c80fb370..000000000 --- a/test/e2e/pao/functests/0_config/config.go +++ /dev/null @@ -1,197 +0,0 @@ -package __performance_config - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" -) - -var RunningOnSingleNode bool - -var _ = Describe("[performance][config] Performance configuration", func() { - - testutils.BeforeAll(func() { - isSNO, err := cluster.IsSingleNode() - Expect(err).ToNot(HaveOccurred()) - RunningOnSingleNode = isSNO - }) - - It("Should successfully deploy the performance profile", func() { - - performanceProfile := testProfile() - profileAlreadyExists := false - - performanceManifest, foundOverride := os.LookupEnv("PERFORMANCE_PROFILE_MANIFEST_OVERRIDE") - var err error - if foundOverride { - performanceProfile, err = externalPerformanceProfile(performanceManifest) - Expect(err).ToNot(HaveOccurred(), "Failed overriding performance profile", performanceManifest) - testlog.Warningf("Consuming performance profile from %s", performanceManifest) - } - if discovery.Enabled() { - performanceProfile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred(), "Failed finding a performance profile in discovery mode using selector %v", testutils.NodeSelectorLabels) - testlog.Info("Discovery mode: consuming a deployed performance profile from the cluster") - profileAlreadyExists = true - } - - By("Getting MCP for profile") - mcpLabel := profile.GetMachineConfigLabel(performanceProfile) - key, value := components.GetFirstKeyAndValue(mcpLabel) - mcpsByLabel, err := mcps.GetByLabel(key, value) - Expect(err).ToNot(HaveOccurred(), "Failed getting MCP by label key %v value %v", key, value) - Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel))) - performanceMCP := &mcpsByLabel[0] - - if !discovery.Enabled() { - By("Creating the PerformanceProfile") - // this might fail while the operator is still being deployed and the CRD does not exist yet - Eventually(func() error { - err := testclient.Client.Create(context.TODO(), performanceProfile) - if errors.IsAlreadyExists(err) { - testlog.Warning(fmt.Sprintf("A PerformanceProfile with name %s already exists! If created externally, tests might have unexpected behaviour", performanceProfile.Name)) - profileAlreadyExists = true - return nil - } - return err - }, cluster.ComputeTestTimeout(15*time.Minute, RunningOnSingleNode), 15*time.Second).ShouldNot(HaveOccurred(), "Failed creating the performance profile") - } - - if !performanceMCP.Spec.Paused { - By("MCP is already unpaused") - } else { - By("Unpausing the MCP") - Expect(testclient.Client.Patch(context.TODO(), performanceMCP, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/paused", "value": %v }]`, false)), - ), - )).ToNot(HaveOccurred(), "Failed unpausing MCP") - } - - By("Waiting for the MCP to pick the PerformanceProfile's MC") - mcps.WaitForProfilePickedUp(performanceMCP.Name, performanceProfile) - - // If the profile is already there, it's likely to have been through the updating phase, so we only - // wait for updated. - if !profileAlreadyExists { - By("Waiting for MCP starting to update") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - } - By("Waiting for MCP being updated") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - }) - -}) - -func externalPerformanceProfile(performanceManifest string) (*performancev2.PerformanceProfile, error) { - performanceScheme := runtime.NewScheme() - performancev2.AddToScheme(performanceScheme) - - decode := serializer.NewCodecFactory(performanceScheme).UniversalDeserializer().Decode - manifest, err := ioutil.ReadFile(performanceManifest) - if err != nil { - return nil, fmt.Errorf("Failed to read %s file", performanceManifest) - } - obj, _, err := decode([]byte(manifest), nil, nil) - if err != nil { - return nil, fmt.Errorf("Failed to read the manifest file %s", performanceManifest) - } - profile, ok := obj.(*performancev2.PerformanceProfile) - if !ok { - return nil, fmt.Errorf("Failed to convert manifest file to profile") - } - return profile, nil -} - -func testProfile() *performancev2.PerformanceProfile { - reserved := performancev2.CPUSet("0") - isolated := performancev2.CPUSet("1-3") - hugePagesSize := performancev2.HugePageSize("1G") - - profile := &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev2.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: utils.PerformanceProfileName, - }, - Spec: performancev2.PerformanceProfileSpec{ - CPU: &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - }, - HugePages: &performancev2.HugePages{ - DefaultHugePagesSize: &hugePagesSize, - Pages: []performancev2.HugePage{ - { - Size: "1G", - Count: 1, - Node: pointer.Int32Ptr(0), - }, - { - Size: "2M", - Count: 128, - }, - }, - }, - NodeSelector: testutils.NodeSelectorLabels, - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - AdditionalKernelArgs: []string{ - "nmi_watchdog=0", - "audit=0", - "mce=off", - "processor.max_cstate=1", - "idle=poll", - "intel_idle.max_cstate=0", - }, - NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.StringPtr("single-numa-node"), - }, - Net: &performancev2.Net{ - UserLevelNetworking: pointer.BoolPtr(true), - }, - }, - } - // If the machineConfigPool is master, the automatic selector from PAO won't work - // since the machineconfiguration.openshift.io/role label is not applied to the - // master pool, hence we put an explicit selector here. - if utils.RoleWorkerCNF == "master" { - profile.Spec.MachineConfigPoolSelector = map[string]string{ - "pools.operator.machineconfiguration.openshift.io/master": "", - } - } - return profile -} diff --git a/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go b/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go deleted file mode 100644 index 2817d296d..000000000 --- a/test/e2e/pao/functests/0_config/test_suite_performance_config_test.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !unittests -// +build !unittests - -package __performance_config_test - -import ( - "testing" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" -) - -func TestPerformanceConfig(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("performance_config")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator configuration", rr) -} - -var _ = BeforeSuite(func() { - Expect(testclient.ClientsEnabled).To(BeTrue()) - -}) diff --git a/test/e2e/pao/functests/1_performance/cpu_management.go b/test/e2e/pao/functests/1_performance/cpu_management.go deleted file mode 100644 index cb3427d9f..000000000 --- a/test/e2e/pao/functests/1_performance/cpu_management.go +++ /dev/null @@ -1,682 +0,0 @@ -package __performance - -import ( - "context" - "fmt" - "regexp" - "sort" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/events" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" -) - -var workerRTNode *corev1.Node -var profile *performancev2.PerformanceProfile - -const ( - sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online" -) - -var _ = Describe("[rfe_id:27363][performance] CPU Management", func() { - var balanceIsolated bool - var reservedCPU, isolatedCPU string - var listReservedCPU []int - var reservedCPUSet cpuset.CPUSet - var onlineCPUSet cpuset.CPUSet - - testutils.BeforeAll(func() { - isSNO, err := cluster.IsSingleNode() - Expect(err).ToNot(HaveOccurred()) - RunningOnSingleNode = isSNO - }) - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) - Expect(workerRTNodes).ToNot(BeEmpty()) - workerRTNode = &workerRTNodes[0] - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - By(fmt.Sprintf("Checking the profile %s with cpus %s", profile.Name, cpuSpecToString(profile.Spec.CPU))) - balanceIsolated = true - if profile.Spec.CPU.BalanceIsolated != nil { - balanceIsolated = *profile.Spec.CPU.BalanceIsolated - } - - Expect(profile.Spec.CPU.Isolated).NotTo(BeNil()) - isolatedCPU = string(*profile.Spec.CPU.Isolated) - - Expect(profile.Spec.CPU.Reserved).NotTo(BeNil()) - reservedCPU = string(*profile.Spec.CPU.Reserved) - reservedCPUSet, err = cpuset.Parse(reservedCPU) - Expect(err).ToNot(HaveOccurred()) - listReservedCPU = reservedCPUSet.ToSlice() - - onlineCPUSet, err = nodes.GetOnlineCPUsSet(workerRTNode) - Expect(err).ToNot(HaveOccurred()) - }) - - Describe("Verification of configuration on the worker node", func() { - It("[test_id:28528][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU reservation on the node", func() { - By(fmt.Sprintf("Allocatable CPU should be less than capacity by %d", len(listReservedCPU))) - capacityCPU, _ := workerRTNode.Status.Capacity.Cpu().AsInt64() - allocatableCPU, _ := workerRTNode.Status.Allocatable.Cpu().AsInt64() - differenceCPUGot := capacityCPU - allocatableCPU - differenceCPUExpected := int64(len(listReservedCPU)) - Expect(differenceCPUGot).To(Equal(differenceCPUExpected), "Allocatable CPU %d should be less than capacity %d by %d; got %d instead", allocatableCPU, capacityCPU, differenceCPUExpected, differenceCPUGot) - }) - - It("[test_id:37862][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Verify CPU affinity mask, CPU reservation and CPU isolation on worker node", func() { - By("checking isolated CPU") - cmd := []string{"cat", "/sys/devices/system/cpu/isolated"} - sysIsolatedCpus, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - if balanceIsolated { - Expect(sysIsolatedCpus).To(BeEmpty()) - } else { - Expect(sysIsolatedCpus).To(Equal(isolatedCPU)) - } - - By("checking reserved CPU in kubelet config file") - cmd = []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"} - conf, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred(), "failed to cat kubelet.conf") - // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex - Expect(conf).To(MatchRegexp(fmt.Sprintf(`"reservedSystemCPUs": ?"%s"`, reservedCPU))) - - By("checking CPU affinity mask for kernel scheduler") - cmd = []string{"/bin/bash", "-c", "taskset -pc 1"} - sched, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute taskset") - mask := strings.SplitAfter(sched, " ") - maskSet, err := cpuset.Parse(mask[len(mask)-1]) - Expect(err).ToNot(HaveOccurred()) - - Expect(reservedCPUSet.IsSubsetOf(maskSet)).To(Equal(true), fmt.Sprintf("The init process (pid 1) should have cpu affinity: %s", reservedCPU)) - }) - - It("[test_id:34358] Verify rcu_nocbs kernel argument on the node", func() { - By("checking that cmdline contains rcu_nocbs with right value") - cmd := []string{"cat", "/proc/cmdline"} - cmdline, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - re := regexp.MustCompile(`rcu_nocbs=\S+`) - rcuNocbsArgument := re.FindString(cmdline) - Expect(rcuNocbsArgument).To(ContainSubstring("rcu_nocbs=")) - rcuNocbsCpu := strings.Split(rcuNocbsArgument, "=")[1] - Expect(rcuNocbsCpu).To(Equal(isolatedCPU)) - - By("checking that new rcuo processes are running on non_isolated cpu") - cmd = []string{"pgrep", "rcuo"} - rcuoList, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - for _, rcuo := range strings.Split(rcuoList, "\n") { - // check cpu affinity mask - cmd = []string{"/bin/bash", "-c", fmt.Sprintf("taskset -pc %s", rcuo)} - taskset, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - mask := strings.SplitAfter(taskset, " ") - maskSet, err := cpuset.Parse(mask[len(mask)-1]) - Expect(err).ToNot(HaveOccurred()) - Expect(reservedCPUSet.IsSubsetOf(maskSet)).To(Equal(true), "The process should have cpu affinity: %s", reservedCPU) - } - }) - }) - - Describe("Verification of cpu manager functionality", func() { - var testpod *corev1.Pod - var discoveryFailed bool - - testutils.BeforeAll(func() { - discoveryFailed = false - if discovery.Enabled() { - profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - isolatedCPU = string(*profile.Spec.CPU.Isolated) - } - }) - - BeforeEach(func() { - if discoveryFailed { - Skip("Skipping tests since there are insufficant isolated cores to create a stress pod") - } - }) - - AfterEach(func() { - deleteTestPod(testpod) - }) - - table.DescribeTable("Verify CPU usage by stress PODs", func(guaranteed bool) { - cpuID := onlineCPUSet.ToSliceNoSort()[0] - smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode) - if smtLevel < 2 { - Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel)) - } - - // note must be a multiple of the smtLevel. Pick the minimum to maximize the chances to run on CI - cpuRequest := smtLevel - testpod = getStressPod(workerRTNode.Name, cpuRequest) - testpod.Namespace = testutils.NamespaceTesting - - listCPU := onlineCPUSet.ToSlice() - expectedQos := corev1.PodQOSBurstable - - if guaranteed { - listCPU = onlineCPUSet.Difference(reservedCPUSet).ToSlice() - expectedQos = corev1.PodQOSGuaranteed - promotePodToGuaranteed(testpod) - } else if !balanceIsolated { - // when balanceIsolated is False - non-guaranteed pod should run on reserved cpu - listCPU = listReservedCPU - } - - var err error - err = testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - logEventsForPod(testpod) - Expect(err).ToNot(HaveOccurred()) - - updatedPod := &corev1.Pod{} - err = testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), updatedPod) - Expect(err).ToNot(HaveOccurred()) - Expect(updatedPod.Status.QOSClass).To(Equal(expectedQos), - "unexpected QoS Class for %s/%s: %s (looking for %s)", - updatedPod.Namespace, updatedPod.Name, updatedPod.Status.QOSClass, expectedQos) - - output, err := nodes.ExecCommandOnNode( - []string{"/bin/bash", "-c", "ps -o psr $(pgrep -n stress) | tail -1"}, - workerRTNode, - ) - Expect(err).ToNot(HaveOccurred(), "failed to get cpu of stress process") - cpu, err := strconv.Atoi(strings.Trim(output, " ")) - Expect(err).ToNot(HaveOccurred()) - - Expect(cpu).To(BeElementOf(listCPU)) - }, - table.Entry("[test_id:37860] Non-guaranteed POD can work on any CPU", false), - table.Entry("[test_id:27492] Guaranteed POD should work on isolated cpu", true), - ) - }) - - When("pod runs with the CPU load balancing runtime class", func() { - var smtLevel int - var testpod *corev1.Pod - var defaultFlags map[int][]int - - getCPUsSchedulingDomainFlags := func() (map[int][]int, error) { - cmd := []string{"/bin/bash", "-c", "more /proc/sys/kernel/sched_domain/cpu*/domain*/flags | cat"} - out, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - if err != nil { - return nil, err - } - - re, err := regexp.Compile(`/proc/sys/kernel/sched_domain/cpu(\d+)/domain\d+/flags\n:+\n(\d+)`) - if err != nil { - return nil, err - } - - allSubmatch := re.FindAllStringSubmatch(out, -1) - cpuToSchedDomains := map[int][]int{} - for _, submatch := range allSubmatch { - if len(submatch) != 3 { - return nil, fmt.Errorf("the sched_domain submatch %v does not have a valid length", submatch) - } - - cpu, err := strconv.Atoi(submatch[1]) - if err != nil { - return nil, err - } - - if _, ok := cpuToSchedDomains[cpu]; !ok { - cpuToSchedDomains[cpu] = []int{} - } - - flags, err := strconv.Atoi(submatch[2]) - if err != nil { - return nil, err - } - - cpuToSchedDomains[cpu] = append(cpuToSchedDomains[cpu], flags) - } - - // sort sched_domain - for cpu := range cpuToSchedDomains { - sort.Ints(cpuToSchedDomains[cpu]) - } - - testlog.Infof("Scheduler domains: %v", cpuToSchedDomains) - return cpuToSchedDomains, nil - } - - BeforeEach(func() { - var err error - defaultFlags, err = getCPUsSchedulingDomainFlags() - Expect(err).ToNot(HaveOccurred()) - - annotations := map[string]string{ - "cpu-load-balancing.crio.io": "disable", - } - // any random existing cpu is fine - cpuID := onlineCPUSet.ToSliceNoSort()[0] - smtLevel = nodes.GetSMTLevel(cpuID, workerRTNode) - testpod = getTestPodWithAnnotations(annotations, smtLevel) - }) - - AfterEach(func() { - deleteTestPod(testpod) - }) - - It("[test_id:32646] should disable CPU load balancing for CPU's used by the pod", func() { - var err error - By("Starting the pod") - err = testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - logEventsForPod(testpod) - Expect(err).ToNot(HaveOccurred()) - - By("Getting the container cpuset.cpus cgroup") - containerID, err := pods.GetContainerIDByName(testpod, "test") - Expect(err).ToNot(HaveOccurred()) - - containerCgroup := "" - Eventually(func() string { - cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find /rootfs/sys/fs/cgroup/cpuset/ -name *%s*", containerID)} - containerCgroup, err = nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - return containerCgroup - }, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).ShouldNot(BeEmpty(), - fmt.Sprintf("cannot find cgroup for container %q", containerID)) - - By("Checking what CPU the pod is using") - cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", containerCgroup)} - output, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - cpus, err := cpuset.Parse(output) - Expect(err).ToNot(HaveOccurred()) - - By("Getting the CPU scheduling flags") - flags, err := getCPUsSchedulingDomainFlags() - Expect(err).ToNot(HaveOccurred()) - - By("Verifying that the CPU load balancing was disabled") - for _, cpu := range cpus.ToSlice() { - Expect(len(flags[cpu])).To(Equal(len(defaultFlags[cpu]))) - // the CPU flags should be almost the same except the LSB that should be disabled - // see https://github.com/torvalds/linux/blob/0fe5f9ca223573167c4c4156903d751d2c8e160e/include/linux/sched/topology.h#L14 - // for more information regarding the sched domain flags - for i := range flags[cpu] { - Expect(flags[cpu][i]).To(Equal(defaultFlags[cpu][i] - 1)) - } - } - - By("Deleting the pod") - deleteTestPod(testpod) - - By("Getting the CPU scheduling flags") - flags, err = getCPUsSchedulingDomainFlags() - Expect(err).ToNot(HaveOccurred()) - - By("Verifying that the CPU load balancing was enabled back") - for _, cpu := range cpus.ToSlice() { - Expect(len(flags[cpu])).To(Equal(len(defaultFlags[cpu]))) - // the CPU scheduling flags should be restored to the default values - for i := range flags[cpu] { - Expect(flags[cpu][i]).To(Equal(defaultFlags[cpu][i])) - } - } - }) - }) - - Describe("Verification that IRQ load balance can be disabled per POD", func() { - var smtLevel int - var testpod *corev1.Pod - - BeforeEach(func() { - Skip("part of interrupts does not support CPU affinity change because of underlying hardware") - - if profile.Spec.GloballyDisableIrqLoadBalancing != nil && *profile.Spec.GloballyDisableIrqLoadBalancing { - Skip("IRQ load balance should be enabled (GloballyDisableIrqLoadBalancing=false), skipping test") - } - - cpuID := onlineCPUSet.ToSliceNoSort()[0] - smtLevel = nodes.GetSMTLevel(cpuID, workerRTNode) - }) - - AfterEach(func() { - deleteTestPod(testpod) - }) - - It("[test_id:36364] should disable IRQ balance for CPU where POD is running", func() { - By("checking default smp affinity is equal to all active CPUs") - defaultSmpAffinitySet, err := nodes.GetDefaultSmpAffinitySet(workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - onlineCPUsSet, err := nodes.GetOnlineCPUsSet(workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - Expect(onlineCPUsSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeTrue(), "All online CPUs %s should be subset of default SMP affinity %s", onlineCPUsSet, defaultSmpAffinitySet) - - By("Running pod with annotations that disable specific CPU from IRQ balancer") - annotations := map[string]string{ - "irq-load-balancing.crio.io": "disable", - "cpu-quota.crio.io": "disable", - } - testpod = getTestPodWithAnnotations(annotations, smtLevel) - - err = testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - logEventsForPod(testpod) - Expect(err).ToNot(HaveOccurred()) - - By("Checking that the default smp affinity mask was updated and CPU (where POD is running) isolated") - defaultSmpAffinitySet, err = nodes.GetDefaultSmpAffinitySet(workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - getPsr := []string{"/bin/bash", "-c", "grep Cpus_allowed_list /proc/self/status | awk '{print $2}'"} - psr, err := pods.WaitForPodOutput(testclient.K8sClient, testpod, getPsr) - Expect(err).ToNot(HaveOccurred()) - psrSet, err := cpuset.Parse(strings.Trim(string(psr), "\n")) - Expect(err).ToNot(HaveOccurred()) - - Expect(psrSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeFalse(), fmt.Sprintf("Default SMP affinity should not contain isolated CPU %s", psr)) - - By("Checking that there are no any active IRQ on isolated CPU") - // It may takes some time for the system to reschedule active IRQs - Eventually(func() bool { - getActiveIrq := []string{"/bin/bash", "-c", "for n in $(find /proc/irq/ -name smp_affinity_list); do echo $(cat $n); done"} - activeIrq, err := nodes.ExecCommandOnNode(getActiveIrq, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - Expect(activeIrq).ToNot(BeEmpty()) - for _, irq := range strings.Split(activeIrq, "\n") { - irqAffinity, err := cpuset.Parse(irq) - Expect(err).ToNot(HaveOccurred()) - if !irqAffinity.Equals(onlineCPUsSet) && psrSet.IsSubsetOf(irqAffinity) { - return false - } - } - return true - }, (cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode)), 5*time.Second).Should(BeTrue(), - fmt.Sprintf("IRQ still active on CPU%s", psr)) - - By("Checking that after removing POD default smp affinity is returned back to all active CPUs") - deleteTestPod(testpod) - defaultSmpAffinitySet, err = nodes.GetDefaultSmpAffinitySet(workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - Expect(onlineCPUsSet.IsSubsetOf(defaultSmpAffinitySet)).To(BeTrue(), "All online CPUs %s should be subset of default SMP affinity %s", onlineCPUsSet, defaultSmpAffinitySet) - }) - }) - - When("reserved CPUs specified", func() { - var testpod *corev1.Pod - - BeforeEach(func() { - testpod = pods.GetTestPod() - testpod.Namespace = testutils.NamespaceTesting - testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name} - testpod.Spec.ShareProcessNamespace = pointer.BoolPtr(true) - - err := testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - logEventsForPod(testpod) - Expect(err).ToNot(HaveOccurred()) - }) - - It("[test_id:49147] should run infra containers on reserved CPUs", func() { - var err error - // find used because that crictl does not show infra containers, `runc list` shows them - // but you will need somehow to find infra containers ID's - podUID := strings.Replace(string(testpod.UID), "-", "_", -1) - - podCgroup := "" - Eventually(func() string { - cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find /rootfs/sys/fs/cgroup/cpuset/ -name *%s*", podUID)} - podCgroup, err = nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - return podCgroup - }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), - fmt.Sprintf("cannot find cgroup for pod %q", podUID)) - - containersCgroups := "" - Eventually(func() string { - cmd := []string{"/bin/bash", "-c", fmt.Sprintf("find %s -name crio-*", podCgroup)} - containersCgroups, err = nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - return containersCgroups - }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), 5*time.Second).ShouldNot(BeEmpty(), - fmt.Sprintf("cannot find containers cgroups from pod cgroup %q", podCgroup)) - - containerID, err := pods.GetContainerIDByName(testpod, "test") - Expect(err).ToNot(HaveOccurred()) - - containersCgroups = strings.Trim(containersCgroups, "\n") - containersCgroupsDirs := strings.Split(containersCgroups, "\n") - Expect(len(containersCgroupsDirs)).To(Equal(2), "unexpected amount of containers cgroups") - - for _, dir := range containersCgroupsDirs { - // skip application container cgroup - if strings.Contains(dir, containerID) { - continue - } - - By("Checking what CPU the infra container is using") - cmd := []string{"/bin/bash", "-c", fmt.Sprintf("cat %s/cpuset.cpus", dir)} - output, err := nodes.ExecCommandOnNode(cmd, workerRTNode) - Expect(err).ToNot(HaveOccurred()) - - cpus, err := cpuset.Parse(output) - Expect(err).ToNot(HaveOccurred()) - - Expect(cpus.ToSlice()).To(Equal(reservedCPUSet.ToSlice())) - } - }) - }) - - When("strict NUMA aligment is requested", func() { - var testpod *corev1.Pod - - BeforeEach(func() { - if profile.Spec.NUMA == nil || profile.Spec.NUMA.TopologyPolicy == nil { - Skip("Topology Manager Policy is not configured") - } - tmPolicy := *profile.Spec.NUMA.TopologyPolicy - if tmPolicy != "single-numa-node" { - Skip("Topology Manager Policy is not Single NUMA Node") - } - }) - - AfterEach(func() { - if testpod == nil { - return - } - deleteTestPod(testpod) - }) - - It("[test_id:49149] should reject pods which request integral CPUs not aligned with machine SMT level", func() { - // any random existing cpu is fine - cpuID := onlineCPUSet.ToSliceNoSort()[0] - smtLevel := nodes.GetSMTLevel(cpuID, workerRTNode) - if smtLevel < 2 { - Skip(fmt.Sprintf("designated worker node %q has SMT level %d - minimum required 2", workerRTNode.Name, smtLevel)) - } - - cpuCount := 1 // must be intentionally < than the smtLevel to trigger the kubelet validation - testpod = promotePodToGuaranteed(getStressPod(workerRTNode.Name, cpuCount)) - testpod.Namespace = testutils.NamespaceTesting - - err := testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForPredicate(testpod, 10*time.Minute, func(pod *corev1.Pod) (bool, error) { - if pod.Status.Phase != corev1.PodPending { - return true, nil - } - return false, nil - }) - Expect(err).ToNot(HaveOccurred()) - - updatedPod := &corev1.Pod{} - err = testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), updatedPod) - Expect(err).ToNot(HaveOccurred()) - - Expect(updatedPod.Status.Phase).To(Equal(corev1.PodFailed), "pod %s not failed: %v", updatedPod.Name, updatedPod.Status) - Expect(isSMTAlignmentError(updatedPod)).To(BeTrue(), "pod %s failed for wrong reason: %q", updatedPod.Name, updatedPod.Status.Reason) - }) - }) - -}) - -func isSMTAlignmentError(pod *corev1.Pod) bool { - re := regexp.MustCompile(`SMT.*Alignment.*Error`) - return re.MatchString(pod.Status.Reason) -} - -func getStressPod(nodeName string, cpus int) *corev1.Pod { - cpuCount := fmt.Sprintf("%d", cpus) - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-cpu-", - Labels: map[string]string{ - "test": "", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "stress-test", - Image: images.Test(), - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(cpuCount), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - Command: []string{"/usr/bin/stresser"}, - Args: []string{"-cpus", cpuCount}, - }, - }, - NodeSelector: map[string]string{ - testutils.LabelHostname: nodeName, - }, - }, - } -} - -func promotePodToGuaranteed(pod *corev1.Pod) *corev1.Pod { - for idx := 0; idx < len(pod.Spec.Containers); idx++ { - cnt := &pod.Spec.Containers[idx] // shortcut - if cnt.Resources.Limits == nil { - cnt.Resources.Limits = make(corev1.ResourceList) - } - for resName, resQty := range cnt.Resources.Requests { - cnt.Resources.Limits[resName] = resQty - } - } - return pod -} - -func getTestPodWithAnnotations(annotations map[string]string, cpus int) *corev1.Pod { - testpod := pods.GetTestPod() - testpod.Annotations = annotations - testpod.Namespace = testutils.NamespaceTesting - - cpuCount := fmt.Sprintf("%d", cpus) - - resCpu := resource.MustParse(cpuCount) - resMem := resource.MustParse("256Mi") - - // change pod resource requirements, to change the pod QoS class to guaranteed - testpod.Spec.Containers[0].Resources = corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resCpu, - corev1.ResourceMemory: resMem, - }, - } - - runtimeClassName := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - testpod.Spec.RuntimeClassName = &runtimeClassName - testpod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name} - - return testpod -} - -func deleteTestPod(testpod *corev1.Pod) { - // it possible that the pod already was deleted as part of the test, in this case we want to skip teardown - err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(testpod), testpod) - if errors.IsNotFound(err) { - return - } - - err = testclient.Client.Delete(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForDeletion(testpod, pods.DefaultDeletionTimeout*time.Second) - Expect(err).ToNot(HaveOccurred()) -} - -func cpuSpecToString(cpus *performancev2.CPU) string { - if cpus == nil { - return "" - } - sb := strings.Builder{} - if cpus.Reserved != nil { - fmt.Fprintf(&sb, "reserved=[%s]", *cpus.Reserved) - } - if cpus.Isolated != nil { - fmt.Fprintf(&sb, " isolated=[%s]", *cpus.Isolated) - } - if cpus.BalanceIsolated != nil { - fmt.Fprintf(&sb, " balanceIsolated=%t", *cpus.BalanceIsolated) - } - return sb.String() -} - -func logEventsForPod(testPod *corev1.Pod) { - evs, err := events.GetEventsForObject(testclient.Client, testPod.Namespace, testPod.Name, string(testPod.UID)) - if err != nil { - testlog.Error(err) - } - for _, event := range evs.Items { - testlog.Warningf("-> %s %s %s", event.Action, event.Reason, event.Message) - } -} diff --git a/test/e2e/pao/functests/1_performance/hugepages.go b/test/e2e/pao/functests/1_performance/hugepages.go deleted file mode 100644 index 688ab0b0e..000000000 --- a/test/e2e/pao/functests/1_performance/hugepages.go +++ /dev/null @@ -1,213 +0,0 @@ -package __performance - -import ( - "context" - "fmt" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" -) - -var _ = Describe("[performance]Hugepages", func() { - var workerRTNode *corev1.Node - var profile *performancev2.PerformanceProfile - - testutils.BeforeAll(func() { - isSNO, err := cluster.IsSingleNode() - Expect(err).ToNot(HaveOccurred()) - RunningOnSingleNode = isSNO - }) - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - var err error - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) - Expect(workerRTNodes).ToNot(BeEmpty()) - workerRTNode = &workerRTNodes[0] - - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.HugePages == nil || len(profile.Spec.HugePages.Pages) == 0 { - Skip("Hugepages is not configured in performance profile") - } - }) - - // We have multiple hugepages e2e tests under the upstream, so the only thing that we should check, if the PAO configure - // correctly number of hugepages that will be available on the node - Context("[rfe_id:27369]when NUMA node specified", func() { - It("[test_id:27752][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be allocated on the specifed NUMA node ", func() { - for _, page := range profile.Spec.HugePages.Pages { - if page.Node == nil { - continue - } - - hugepagesSize, err := machineconfig.GetHugepagesSizeKilobytes(page.Size) - Expect(err).ToNot(HaveOccurred()) - - availableHugepagesFile := fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages", *page.Node, hugepagesSize) - nrHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode) - - freeHugepagesFile := fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/free_hugepages", *page.Node, hugepagesSize) - freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode) - - Expect(int32(nrHugepages)).To(Equal(page.Count), "The number of available hugepages should be equal to the number in performance profile") - Expect(nrHugepages).To(Equal(freeHugepages), "On idle system the number of available hugepages should be equal to free hugepages") - } - }) - }) - - Context("with multiple sizes", func() { - It("[test_id:34080] should be supported and available for the container usage", func() { - for _, page := range profile.Spec.HugePages.Pages { - hugepagesSize, err := machineconfig.GetHugepagesSizeKilobytes(page.Size) - Expect(err).ToNot(HaveOccurred()) - - availableHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages", hugepagesSize) - if page.Node != nil { - availableHugepagesFile = fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/nr_hugepages", *page.Node, hugepagesSize) - } - nrHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode) - - if discovery.Enabled() && nrHugepages != 0 { - Skip("Skipping test since other guests might reside in the cluster affecting results") - } - - freeHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/free_hugepages", hugepagesSize) - if page.Node != nil { - freeHugepagesFile = fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%skB/free_hugepages", *page.Node, hugepagesSize) - } - - freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode) - - Expect(int32(nrHugepages)).To(Equal(page.Count), "The number of available hugepages should be equal to the number in performance profile") - Expect(nrHugepages).To(Equal(freeHugepages), "On idle system the number of available hugepages should be equal to free hugepages") - } - }) - }) - - Context("[rfe_id:27354]Huge pages support for container workloads", func() { - var testpod *corev1.Pod - - AfterEach(func() { - err := testclient.Client.Delete(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForDeletion(testpod, pods.DefaultDeletionTimeout*time.Second) - Expect(err).ToNot(HaveOccurred()) - }) - - It("[test_id:27477][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Huge pages support for container workloads", func() { - hpSize := profile.Spec.HugePages.Pages[0].Size - hpSizeKb, err := machineconfig.GetHugepagesSizeKilobytes(hpSize) - Expect(err).ToNot(HaveOccurred()) - - By("checking hugepages usage in bytes - should be 0 on idle system") - usageHugepagesFile := fmt.Sprintf("/rootfs/sys/fs/cgroup/hugetlb/hugetlb.%sB.usage_in_bytes", hpSize) - usageHugepages := checkHugepagesStatus(usageHugepagesFile, workerRTNode) - if discovery.Enabled() && usageHugepages != 0 { - Skip("Skipping test since other guests might reside in the cluster affecting results") - } - Expect(usageHugepages).To(Equal(0), "Found used hugepages, expected 0") - - By("running the POD and waiting while it's installing testing tools") - testpod = getCentosPod(workerRTNode.Name) - testpod.Namespace = testutils.NamespaceTesting - testpod.Spec.Containers[0].Resources.Limits = map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceName(fmt.Sprintf("hugepages-%si", hpSize)): resource.MustParse(fmt.Sprintf("%si", hpSize)), - corev1.ResourceMemory: resource.MustParse("1Gi"), - } - err = testclient.Client.Create(context.TODO(), testpod) - Expect(err).ToNot(HaveOccurred()) - err = pods.WaitForCondition(testpod, corev1.PodReady, corev1.ConditionTrue, 10*time.Minute) - Expect(err).ToNot(HaveOccurred()) - - cmd2 := []string{"/bin/bash", "-c", "tmux new -d 'LD_PRELOAD=libhugetlbfs.so HUGETLB_MORECORE=yes top -b > /dev/null'"} - _, err = pods.ExecCommandOnPod(testclient.K8sClient, testpod, cmd2) - Expect(err).ToNot(HaveOccurred()) - - By("checking free hugepages - one should be used by pod") - availableHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/nr_hugepages", hpSizeKb) - availableHugepages := checkHugepagesStatus(availableHugepagesFile, workerRTNode) - - freeHugepagesFile := fmt.Sprintf("/sys/kernel/mm/hugepages/hugepages-%skB/free_hugepages", hpSizeKb) - Eventually(func() int { - freeHugepages := checkHugepagesStatus(freeHugepagesFile, workerRTNode) - return availableHugepages - freeHugepages - }, cluster.ComputeTestTimeout(30*time.Second, RunningOnSingleNode), time.Second).Should(Equal(1)) - - By("checking hugepages usage in bytes") - usageHugepages = checkHugepagesStatus(usageHugepagesFile, workerRTNode) - Expect(strconv.Itoa(usageHugepages/1024)).To(Equal(hpSizeKb), "usage in bytes should be %s", hpSizeKb) - }) - }) -}) - -func checkHugepagesStatus(path string, workerRTNode *corev1.Node) int { - command := []string{"cat", path} - out, err := nodes.ExecCommandOnMachineConfigDaemon(workerRTNode, command) - Expect(err).ToNot(HaveOccurred()) - n, err := strconv.Atoi(strings.Trim(string(out), "\n\r")) - Expect(err).ToNot(HaveOccurred()) - return n -} - -func getCentosPod(nodeName string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-hugepages-", - Labels: map[string]string{ - "test": "", - }, - }, - Spec: corev1.PodSpec{ - Volumes: []corev1.Volume{ - { - Name: "hugepages", - VolumeSource: corev1.VolumeSource{ - EmptyDir: &corev1.EmptyDirVolumeSource{Medium: corev1.StorageMediumHugePages}, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "test", - Image: images.Test(), - Command: []string{"sleep", "10h"}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "hugepages", - MountPath: "/dev/hugepages", - }, - }, - }, - }, - NodeSelector: map[string]string{ - testutils.LabelHostname: nodeName, - }, - }, - } -} diff --git a/test/e2e/pao/functests/1_performance/netqueues.go b/test/e2e/pao/functests/1_performance/netqueues.go deleted file mode 100644 index 5cf9b436f..000000000 --- a/test/e2e/pao/functests/1_performance/netqueues.go +++ /dev/null @@ -1,364 +0,0 @@ -package __performance - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" -) - -var _ = Describe("[ref_id: 40307][pao]Resizing Network Queues", func() { - var workerRTNodes []corev1.Node - var profile, initialProfile *performancev2.PerformanceProfile - var performanceProfileName string - - testutils.BeforeAll(func() { - isSNO, err := cluster.IsSingleNode() - Expect(err).ToNot(HaveOccurred()) - RunningOnSingleNode = isSNO - - workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred()) - - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - By("Backing up the profile") - initialProfile = profile.DeepCopy() - - performanceProfileName = profile.Name - - tunedPaoProfile := fmt.Sprintf("openshift-node-performance-%s", performanceProfileName) - //Verify the tuned profile is created on the worker-cnf nodes: - tunedCmd := []string{"tuned-adm", "profile_info", tunedPaoProfile} - for _, node := range workerRTNodes { - tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode) - _, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd) - Expect(err).ToNot(HaveOccurred()) - } - }) - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.Net == nil { - By("Enable UserLevelNetworking in Profile") - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), - } - By("Updating the performance profile") - profiles.UpdateWithRetry(profile) - } - }) - - AfterEach(func() { - By("Reverting the Profile") - spec, err := json.Marshal(initialProfile.Spec) - Expect(err).ToNot(HaveOccurred()) - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - }) - - Context("Updating performance profile for netqueues", func() { - It("[test_id:40308][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Network device queues Should be set to the profile's reserved CPUs count ", func() { - nodesDevices := make(map[string]map[string]int) - if profile.Spec.Net != nil { - if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { - By("To all non virtual network devices when no devices are specified under profile.Spec.Net.Devices") - err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile) - if err != nil { - Skip("Skipping Test: Unable to set Network queue size to reserved cpu count") - } - } - } - }) - - It("[test_id:40542] Verify the number of network queues of all supported network interfaces are equal to reserved cpus count", func() { - nodesDevices := make(map[string]map[string]int) - err := checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile) - if err != nil { - Skip("Skipping Test: Unable to set Network queue size to reserved cpu count") - } - }) - - It("[test_id:40543] Add interfaceName and verify the interface netqueues are equal to reserved cpus count.", func() { - nodesDevices := make(map[string]map[string]int) - deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices) - Expect(err).ToNot(HaveOccurred()) - if !deviceSupport { - Skip("Skipping Test: There are no supported Network Devices") - } - nodeName, device := getRandomNodeDevice(nodesDevices) - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { - By("Enable UserLevelNetworking and add Devices in Profile") - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), - Devices: []performancev2.Device{ - { - InterfaceName: &device, - }, - }, - } - By("Updating the performance profile") - profiles.UpdateWithRetry(profile) - } - //Verify the tuned profile is created on the worker-cnf nodes: - tunedCmd := []string{"bash", "-c", - fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)} - - node, err := nodes.GetByName(nodeName) - Expect(err).ToNot(HaveOccurred()) - tunedPod := nodes.TunedForNode(node, RunningOnSingleNode) - - Eventually(func() bool { - out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd) - if err != nil { - return false - } - return strings.ContainsAny(string(out), device) - }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex") - - nodesDevices = make(map[string]map[string]int) - err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile) - if err != nil { - Skip("Skipping Test: Unable to set Network queue size to reserved cpu count") - } - }) - - It("[test_id:40545] Verify reserved cpus count is applied to specific supported networking devices using wildcard matches", func() { - nodesDevices := make(map[string]map[string]int) - var device, devicePattern string - deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices) - Expect(err).ToNot(HaveOccurred()) - if !deviceSupport { - Skip("Skipping Test: There are no supported Network Devices") - } - nodeName, device := getRandomNodeDevice(nodesDevices) - devicePattern = device[:len(device)-1] + "*" - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { - By("Enable UserLevelNetworking and add Devices in Profile") - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), - Devices: []performancev2.Device{ - { - InterfaceName: &devicePattern, - }, - }, - } - profiles.UpdateWithRetry(profile) - } - //Verify the tuned profile is created on the worker-cnf nodes: - tunedCmd := []string{"bash", "-c", - fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)} - - node, err := nodes.GetByName(nodeName) - Expect(err).ToNot(HaveOccurred()) - tunedPod := nodes.TunedForNode(node, RunningOnSingleNode) - - Eventually(func() bool { - out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd) - if err != nil { - return false - } - return strings.ContainsAny(string(out), device) - }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex") - - nodesDevices = make(map[string]map[string]int) - err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile) - if err != nil { - Skip("Skipping Test: Unable to set Network queue size to reserved cpu count") - } - }) - - It("[test_id:40668] Verify reserved cpu count is added to networking devices matched with vendor and Device id", func() { - nodesDevices := make(map[string]map[string]int) - deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices) - Expect(err).ToNot(HaveOccurred()) - if !deviceSupport { - Skip("Skipping Test: There are no supported Network Devices") - } - nodeName, device := getRandomNodeDevice(nodesDevices) - node, err := nodes.GetByName(nodeName) - Expect(err).ToNot(HaveOccurred()) - vid := getVendorID(*node, device) - did := getDeviceID(*node, device) - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.Net.UserLevelNetworking != nil && *profile.Spec.Net.UserLevelNetworking && len(profile.Spec.Net.Devices) == 0 { - By("Enable UserLevelNetworking and add DeviceID, VendorID and Interface in Profile") - profile.Spec.Net = &performancev2.Net{ - UserLevelNetworking: pointer.Bool(true), - Devices: []performancev2.Device{ - { - InterfaceName: &device, - }, - { - VendorID: &vid, - DeviceID: &did, - }, - }, - } - profiles.UpdateWithRetry(profile) - } - //Verify the tuned profile is created on the worker-cnf nodes: - tunedCmd := []string{"bash", "-c", - fmt.Sprintf("cat /etc/tuned/openshift-node-performance-%s/tuned.conf | grep devices_udev_regex", performanceProfileName)} - - node, err = nodes.GetByName(nodeName) - Expect(err).ToNot(HaveOccurred()) - tunedPod := nodes.TunedForNode(node, RunningOnSingleNode) - Eventually(func() bool { - out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, tunedCmd) - if err != nil { - return false - } - return strings.ContainsAny(string(out), device) - }, cluster.ComputeTestTimeout(2*time.Minute, RunningOnSingleNode), 5*time.Second).Should(BeTrue(), "could not get a tuned profile set with devices_udev_regex") - - nodesDevices = make(map[string]map[string]int) - err = checkDeviceSetWithReservedCPU(workerRTNodes, nodesDevices, *profile) - if err != nil { - Skip("Skipping Test: Unable to set Network queue size to reserved cpu count") - } - }) - }) -}) - -// Check a device that supports multiple queues and set with with reserved CPU size exists -func checkDeviceSetWithReservedCPU(workerRTNodes []corev1.Node, nodesDevices map[string]map[string]int, profile performancev2.PerformanceProfile) error { - return wait.PollImmediate(5*time.Second, 90*time.Second, func() (bool, error) { - deviceSupport, err := checkDeviceSupport(workerRTNodes, nodesDevices) - Expect(err).ToNot(HaveOccurred()) - if !deviceSupport { - return false, nil - } - for _, devices := range nodesDevices { - for _, size := range devices { - if size == getReservedCPUSize(profile.Spec.CPU) { - return true, nil - } - } - } - return false, nil - }) -} - -// Check if the device support multiple queues -func checkDeviceSupport(workernodes []corev1.Node, nodesDevices map[string]map[string]int) (bool, error) { - cmdGetPhysicalDevices := []string{"find", "/sys/class/net", "-type", "l", "-not", "-lname", "*virtual*", "-printf", "%f "} - var channelCurrentCombined int - var noSupportedDevices = true - var err error - for _, node := range workernodes { - if nodesDevices[node.Name] == nil { - nodesDevices[node.Name] = make(map[string]int) - } - tunedPod := nodes.TunedForNode(&node, RunningOnSingleNode) - phyDevs, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, cmdGetPhysicalDevices) - Expect(err).ToNot(HaveOccurred()) - for _, d := range strings.Split(string(phyDevs), " ") { - if d == "" { - continue - } - _, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, []string{"ethtool", "-l", d}) - if err == nil { - cmdCombinedChannelsCurrent := []string{"bash", "-c", - fmt.Sprintf("ethtool -l %s | sed -n '/Current hardware settings:/,/Combined:/{s/^Combined:\\s*//p}'", d)} - out, err := pods.WaitForPodOutput(testclient.K8sClient, tunedPod, cmdCombinedChannelsCurrent) - if strings.Contains(string(out), "n/a") { - fmt.Printf("Device %s doesn't support multiple queues\n", d) - } else { - channelCurrentCombined, err = strconv.Atoi(strings.TrimSpace(string(out))) - if err != nil { - testlog.Warningf(fmt.Sprintf("unable to retrieve current multi-purpose channels hardware settings for device %s on %s", - d, node.Name)) - } - if channelCurrentCombined == 1 { - fmt.Printf("Device %s doesn't support multiple queues\n", d) - } else { - fmt.Printf("Device %s supports multiple queues\n", d) - nodesDevices[node.Name][d] = channelCurrentCombined - noSupportedDevices = false - } - } - } - } - } - if noSupportedDevices { - return false, err - } - return true, err -} - -func getReservedCPUSize(CPU *performancev2.CPU) int { - reservedCPUs, err := cpuset.Parse(string(*CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - return reservedCPUs.Size() -} - -func getVendorID(node corev1.Node, device string) string { - cmd := []string{"bash", "-c", - fmt.Sprintf("cat /sys/class/net/%s/device/vendor", device)} - stdout, err := nodes.ExecCommandOnNode(cmd, &node) - Expect(err).ToNot(HaveOccurred()) - return stdout -} - -func getDeviceID(node corev1.Node, device string) string { - cmd := []string{"bash", "-c", - fmt.Sprintf("cat /sys/class/net/%s/device/device", device)} - stdout, err := nodes.ExecCommandOnNode(cmd, &node) - Expect(err).ToNot(HaveOccurred()) - return stdout -} - -func getRandomNodeDevice(nodesDevices map[string]map[string]int) (string, string) { - node := "" - device := "" - for n := range nodesDevices { - node = n - for d := range nodesDevices[node] { - if d != "" { - device = d - return node, device - } - } - } - return node, device -} diff --git a/test/e2e/pao/functests/1_performance/performance.go b/test/e2e/pao/functests/1_performance/performance.go deleted file mode 100644 index 6c8c065f9..000000000 --- a/test/e2e/pao/functests/1_performance/performance.go +++ /dev/null @@ -1,1333 +0,0 @@ -package __performance - -import ( - "context" - "encoding/json" - "fmt" - "path/filepath" - "reflect" - "regexp" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/api/node/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" - - "sigs.k8s.io/controller-runtime/pkg/client" - - performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1" - performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - componentprofile "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -const ( - testTimeout = 480 - testPollInterval = 2 -) - -var RunningOnSingleNode bool - -var _ = Describe("[rfe_id:27368][performance]", func() { - var workerRTNodes []corev1.Node - var profile *performancev2.PerformanceProfile - - testutils.BeforeAll(func() { - isSNO, err := cluster.IsSingleNode() - Expect(err).ToNot(HaveOccurred()) - RunningOnSingleNode = isSNO - }) - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - var err error - workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred(), "error looking for node with role %q: %v", testutils.RoleWorkerCNF, err) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) - Expect(workerRTNodes).ToNot(BeEmpty(), "no nodes with role %q found", testutils.RoleWorkerCNF) - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred(), "cannot get profile by node labels %v", testutils.NodeSelectorLabels) - }) - - Context("Tuned CRs generated from profile", func() { - tunedExpectedName := components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance) - It("[test_id:31748] Should have the expected name for tuned from the profile owner object", func() { - tunedList := &tunedv1.TunedList{} - key := types.NamespacedName{ - Name: tunedExpectedName, - Namespace: components.NamespaceNodeTuningOperator, - } - tuned := &tunedv1.Tuned{} - err := testclient.Client.Get(context.TODO(), key, tuned) - Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object %q", tuned.Name) - - Eventually(func() bool { - err := testclient.Client.List(context.TODO(), tunedList) - Expect(err).NotTo(HaveOccurred()) - for t := range tunedList.Items { - tunedItem := tunedList.Items[t] - ownerReferences := tunedItem.ObjectMeta.OwnerReferences - for o := range ownerReferences { - if ownerReferences[o].Name == profile.Name && tunedItem.Name != tunedExpectedName { - return false - } - } - } - return true - }, cluster.ComputeTestTimeout(120*time.Second, RunningOnSingleNode), testPollInterval*time.Second).Should(BeTrue(), - "tuned CR name owned by a performance profile CR should only be %q", tunedExpectedName) - }) - - It("[test_id:37127] Node should point to right tuned profile", func() { - for _, node := range workerRTNodes { - tuned := nodes.TunedForNode(&node, RunningOnSingleNode) - activeProfile, err := pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"cat", "/etc/tuned/active_profile"}) - Expect(err).ToNot(HaveOccurred(), "Error getting the tuned active profile") - activeProfileName := string(activeProfile) - Expect(strings.TrimSpace(activeProfileName)).To(Equal(tunedExpectedName), "active profile name mismatch got %q expected %q", activeProfileName, tunedExpectedName) - } - }) - }) - - Context("Pre boot tuning adjusted by tuned ", func() { - - It("[test_id:31198] Should set CPU affinity kernel argument", func() { - for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"}) - Expect(err).ToNot(HaveOccurred()) - // since systemd.cpu_affinity is calculated on node level using tuned we can check only the key in this context. - Expect(string(cmdline)).To(ContainSubstring("systemd.cpu_affinity=")) - } - }) - - It("[test_id:32702] Should set CPU isolcpu's kernel argument managed_irq flag", func() { - for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"}) - Expect(err).ToNot(HaveOccurred()) - if profile.Spec.CPU.BalanceIsolated != nil && *profile.Spec.CPU.BalanceIsolated == false { - Expect(string(cmdline)).To(ContainSubstring("isolcpus=domain,managed_irq,")) - } else { - Expect(string(cmdline)).To(ContainSubstring("isolcpus=managed_irq,")) - } - } - }) - - It("[test_id:27081][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set workqueue CPU mask", func() { - for _, node := range workerRTNodes { - By(fmt.Sprintf("Getting tuned.non_isolcpus kernel argument on %q", node.Name)) - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"}) - Expect(err).ToNot(HaveOccurred()) - re := regexp.MustCompile(`tuned.non_isolcpus=\S+`) - nonIsolcpusFullArgument := re.FindString(string(cmdline)) - Expect(nonIsolcpusFullArgument).To(ContainSubstring("tuned.non_isolcpus="), "tuned.non_isolcpus parameter not found in %q", cmdline) - nonIsolcpusMask := strings.Split(string(nonIsolcpusFullArgument), "=")[1] - nonIsolcpusMaskNoDelimiters := strings.Replace(nonIsolcpusMask, ",", "", -1) - - getTrimmedMaskFromData := func(maskType string, data []byte) string { - trimmed := strings.TrimSpace(string(data)) - testlog.Infof("workqueue %s mask for %q: %q", maskType, node.Name, trimmed) - return strings.Replace(trimmed, ",", "", -1) - } - - expectMasksEqual := func(expected, got string) { - expectedTrimmed := strings.TrimLeft(expected, "0") - gotTrimmed := strings.TrimLeft(got, "0") - ExpectWithOffset(1, expectedTrimmed).Should(Equal(gotTrimmed), "wrong workqueue mask on %q - got %q (from %q) expected %q (from %q)", node.Name, expectedTrimmed, expected, got, gotTrimmed) - } - - By(fmt.Sprintf("Getting the virtual workqueue mask (/sys/devices/virtual/workqueue/cpumask) on %q", node.Name)) - workqueueMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/sys/devices/virtual/workqueue/cpumask"}) - Expect(err).ToNot(HaveOccurred()) - workqueueMask := getTrimmedMaskFromData("virtual", workqueueMaskData) - expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueMask) - - By(fmt.Sprintf("Getting the writeback workqueue mask (/sys/bus/workqueue/devices/writeback/cpumask) on %q", node.Name)) - workqueueWritebackMaskData, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/sys/bus/workqueue/devices/writeback/cpumask"}) - Expect(err).ToNot(HaveOccurred()) - workqueueWritebackMask := getTrimmedMaskFromData("workqueue", workqueueWritebackMaskData) - expectMasksEqual(nonIsolcpusMaskNoDelimiters, workqueueWritebackMask) - } - }) - - It("[test_id:32375][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] initramfs should not have injected configuration", func() { - for _, node := range workerRTNodes { - rhcosId, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"awk", "-F", "/", "{printf $3}", "/rootfs/proc/cmdline"}) - Expect(err).ToNot(HaveOccurred()) - initramfsImagesPath, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"find", filepath.Join("/rootfs/boot/ostree", string(rhcosId)), "-name", "*.img"}) - Expect(err).ToNot(HaveOccurred()) - modifiedImagePath := strings.TrimPrefix(strings.TrimSpace(string(initramfsImagesPath)), "/rootfs") - initrd, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"chroot", "/rootfs", "lsinitrd", modifiedImagePath}) - Expect(err).ToNot(HaveOccurred()) - Expect(string(initrd)).ShouldNot(ContainSubstring("'/etc/systemd/system.conf /etc/systemd/system.conf.d/setAffinity.conf'")) - } - }) - - It("[test_id:35363][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running on the host", func() { - for _, node := range workerRTNodes { - tuned := nodes.TunedForNode(&node, RunningOnSingleNode) - _, err := pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"pidof", "stalld"}) - Expect(err).ToNot(HaveOccurred()) - } - }) - It("[test_id:42400][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] stalld daemon is running as sched_fifo", func() { - for _, node := range workerRTNodes { - pid, err := nodes.ExecCommandOnNode([]string{"pidof", "stalld"}, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandOnNode([]string{"chrt", "-ap", pid}, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(sched_tasks).To(ContainSubstring("scheduling policy: SCHED_FIFO")) - Expect(sched_tasks).To(ContainSubstring("scheduling priority: 10")) - } - }) - It("[test_id:42696][crit:medium][vendor:cnf-qe@redhat.com][level:acceptance] Stalld runs in higher priority than ksoftirq and rcu{c,b}", func() { - for _, node := range workerRTNodes { - stalld_pid, err := nodes.ExecCommandOnNode([]string{"pidof", "stalld"}, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(stalld_pid).ToNot(BeEmpty()) - sched_tasks, err := nodes.ExecCommandOnNode([]string{"chrt", "-ap", stalld_pid}, &node) - Expect(err).ToNot(HaveOccurred()) - re := regexp.MustCompile("scheduling priority: ([0-9]+)") - match := re.FindStringSubmatch(sched_tasks) - stalld_prio, err := strconv.Atoi(match[1]) - Expect(err).ToNot(HaveOccurred()) - - ksoftirq_pid, err := nodes.ExecCommandOnNode([]string{"pgrep", "-f", "ksoftirqd", "-n"}, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(ksoftirq_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandOnNode([]string{"chrt", "-ap", ksoftirq_pid}, &node) - Expect(err).ToNot(HaveOccurred()) - match = re.FindStringSubmatch(sched_tasks) - ksoftirq_prio, err := strconv.Atoi(match[1]) - Expect(err).ToNot(HaveOccurred()) - - if profile.Spec.RealTimeKernel == nil || - profile.Spec.RealTimeKernel.Enabled == nil || - *profile.Spec.RealTimeKernel.Enabled != true { - Expect(stalld_prio).To(BeNumerically("<", ksoftirq_prio)) - testlog.Warning("Skip checking rcu since RT kernel is disabled") - return - } - //rcuc/n : kthreads that are pinned to CPUs & are responsible to execute the callbacks of rcu threads . - //rcub/n : are boosting kthreads ,responsible to monitor per-cpu arrays of lists of tasks that were blocked while in an rcu read-side critical sections. - rcu_pid, err := nodes.ExecCommandOnNode([]string{"pgrep", "-f", "rcu[c,b]", "-n"}, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(rcu_pid).ToNot(BeEmpty()) - sched_tasks, err = nodes.ExecCommandOnNode([]string{"chrt", "-ap", rcu_pid}, &node) - Expect(err).ToNot(HaveOccurred()) - match = re.FindStringSubmatch(sched_tasks) - rcu_prio, err := strconv.Atoi(match[1]) - Expect(err).ToNot(HaveOccurred()) - - Expect(stalld_prio).To(BeNumerically("<", rcu_prio)) - Expect(stalld_prio).To(BeNumerically("<", ksoftirq_prio)) - } - }) - - }) - - Context("Additional kernel arguments added from perfomance profile", func() { - It("[test_id:28611][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should set additional kernel arguments on the machine", func() { - if profile.Spec.AdditionalKernelArgs != nil { - for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", "/proc/cmdline"}) - Expect(err).ToNot(HaveOccurred()) - for _, arg := range profile.Spec.AdditionalKernelArgs { - Expect(string(cmdline)).To(ContainSubstring(arg)) - } - } - } - }) - }) - - Context("Tuned kernel parameters", func() { - It("[test_id:28466][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should contain configuration injected through openshift-node-performance profile", func() { - sysctlMap := map[string]string{ - "kernel.hung_task_timeout_secs": "600", - "kernel.nmi_watchdog": "0", - "kernel.sched_rt_runtime_us": "-1", - "vm.stat_interval": "10", - "kernel.timer_migration": "1", - } - - key := types.NamespacedName{ - Name: components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tuned := &tunedv1.Tuned{} - err := testclient.Client.Get(context.TODO(), key, tuned) - Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object "+key.String()) - validateTunedActiveProfile(workerRTNodes) - execSysctlOnWorkers(workerRTNodes, sysctlMap) - }) - }) - - Context("RPS configuration", func() { - It("Should have the correct RPS configuration", func() { - if profile.Spec.CPU == nil || profile.Spec.CPU.Reserved != nil { - return - } - - expectedRPSCPUs, err := cpuset.Parse(string(*profile.Spec.CPU.Reserved)) - Expect(err).ToNot(HaveOccurred()) - ociHookPath := filepath.Join("/rootfs", machineconfig.OCIHooksConfigDir, machineconfig.OCIHooksConfig) - Expect(err).ToNot(HaveOccurred()) - for _, node := range workerRTNodes { - // Verify the OCI RPS hook uses the correct RPS mask - hooksConfig, err := nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"cat", ociHookPath}) - Expect(err).ToNot(HaveOccurred()) - - var hooks map[string]interface{} - err = json.Unmarshal(hooksConfig, &hooks) - Expect(err).ToNot(HaveOccurred()) - hook := hooks["hook"].(map[string]interface{}) - Expect(hook).ToNot(BeNil()) - args := hook["args"].([]interface{}) - Expect(len(args)).To(Equal(2), "unexpected arguments: %v", args) - - rpsCPUs, err := components.CPUMaskToCPUSet(args[1].(string)) - Expect(err).ToNot(HaveOccurred()) - Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "the hook rps mask is different from the reserved CPUs") - - // Verify the systemd RPS service uses the correct RPS mask - cmd := []string{"sed", "-n", "s/^ExecStart=.*echo \\([A-Fa-f0-9]*\\) .*/\\1/p", "/rootfs/etc/systemd/system/update-rps@.service"} - serviceRPSCPUs, err := nodes.ExecCommandOnNode(cmd, &node) - Expect(err).ToNot(HaveOccurred()) - - rpsCPUs, err = components.CPUMaskToCPUSet(serviceRPSCPUs) - Expect(err).ToNot(HaveOccurred()) - Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "the service rps mask is different from the reserved CPUs") - - // Verify all host network devices have the correct RPS mask - cmd = []string{"find", "/rootfs/sys/devices", "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"} - devsRPS, err := nodes.ExecCommandOnNode(cmd, &node) - Expect(err).ToNot(HaveOccurred()) - - for _, devRPS := range strings.Split(devsRPS, "\n") { - rpsCPUs, err = components.CPUMaskToCPUSet(devRPS) - Expect(err).ToNot(HaveOccurred()) - Expect(rpsCPUs).To(Equal(expectedRPSCPUs), "a host device rps mask is different from the reserved CPUs") - } - - // Verify all node pod network devices have the correct RPS mask - nodePods := &corev1.PodList{} - listOptions := &client.ListOptions{ - Namespace: "", - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), - } - err = testclient.Client.List(context.TODO(), nodePods, listOptions) - Expect(err).ToNot(HaveOccurred()) - - for _, pod := range nodePods.Items { - cmd := []string{"find", "/sys/devices", "-type", "f", "-name", "rps_cpus", "-exec", "cat", "{}", ";"} - devsRPS, err := pods.WaitForPodOutput(testclient.K8sClient, &pod, cmd) - for _, devRPS := range strings.Split(strings.Trim(string(devsRPS), "\n"), "\n") { - rpsCPUs, err = components.CPUMaskToCPUSet(devRPS) - Expect(err).ToNot(HaveOccurred()) - Expect(rpsCPUs).To(Equal(expectedRPSCPUs), pod.Name+" has a device rps mask different from the reserved CPUs") - } - } - } - }) - }) - - Context("Network latency parameters adjusted by the Node Tuning Operator", func() { - It("[test_id:28467][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] Should contain configuration injected through the openshift-node-performance profile", func() { - sysctlMap := map[string]string{ - "net.ipv4.tcp_fastopen": "3", - "kernel.sched_min_granularity_ns": "10000000", - "vm.dirty_ratio": "10", - "vm.dirty_background_ratio": "3", - "vm.swappiness": "10", - "kernel.sched_migration_cost_ns": "5000000", - } - key := types.NamespacedName{ - Name: components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tuned := &tunedv1.Tuned{} - err := testclient.Client.Get(context.TODO(), key, tuned) - Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator object "+components.ProfileNamePerformance) - validateTunedActiveProfile(workerRTNodes) - execSysctlOnWorkers(workerRTNodes, sysctlMap) - }) - }) - - Context("KubeletConfig experimental annotation", func() { - var secondMCP *mcov1.MachineConfigPool - var secondProfile *performancev2.PerformanceProfile - var newRole = "test-annotation" - - BeforeEach(func() { - newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole) - - reserved := performancev2.CPUSet("0") - isolated := performancev2.CPUSet("1-3") - - secondProfile = &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev2.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-annotation", - Annotations: map[string]string{ - "kubeletconfig.experimental": `{"systemReserved": {"memory": "256Mi"}, "kubeReserved": {"memory": "256Mi"}}`, - }, - }, - Spec: performancev2.PerformanceProfileSpec{ - CPU: &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - }, - NodeSelector: map[string]string{newLabel: ""}, - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.StringPtr("restricted"), - }, - }, - } - Expect(testclient.Client.Create(context.TODO(), secondProfile)).ToNot(HaveOccurred()) - - machineConfigSelector := componentprofile.GetMachineConfigLabel(secondProfile) - secondMCP = &mcov1.MachineConfigPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-annotation", - Labels: map[string]string{ - machineconfigv1.MachineConfigRoleLabelKey: newRole, - }, - }, - Spec: mcov1.MachineConfigPoolSpec{ - MachineConfigSelector: &metav1.LabelSelector{ - MatchLabels: machineConfigSelector, - }, - NodeSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - newLabel: "", - }, - }, - }, - } - - Expect(testclient.Client.Create(context.TODO(), secondMCP)).ToNot(HaveOccurred()) - }) - - It("should override system-reserved memory", func() { - var kubeletConfig *machineconfigv1.KubeletConfig - - Eventually(func() error { - By("Getting that new KubeletConfig") - configKey := types.NamespacedName{ - Name: components.GetComponentName(secondProfile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceNone, - } - kubeletConfig = &machineconfigv1.KubeletConfig{} - if err := testclient.GetWithRetry(context.TODO(), configKey, kubeletConfig); err != nil { - klog.Warningf("Failed to get the KubeletConfig %q", configKey.Name) - return err - } - - return nil - }, time.Minute, 5*time.Second).Should(BeNil()) - - kubeletConfigString := string(kubeletConfig.Spec.KubeletConfig.Raw) - Expect(kubeletConfigString).To(ContainSubstring(`"kubeReserved":{"memory":"256Mi"}`)) - Expect(kubeletConfigString).To(ContainSubstring(`"systemReserved":{"memory":"256Mi"}`)) - }) - - AfterEach(func() { - if secondProfile != nil { - if err := profiles.Delete(secondProfile.Name); err != nil { - klog.Warningf("failed to delete the performance profile %q: %v", secondProfile.Name, err) - } - } - - if secondMCP != nil { - if err := mcps.Delete(secondMCP.Name); err != nil { - klog.Warningf("failed to delete the machine config pool %q: %v", secondMCP.Name, err) - } - } - }) - }) - - Context("Create second performance profiles on a cluster", func() { - var secondMCP *mcov1.MachineConfigPool - var secondProfile *performancev2.PerformanceProfile - var newRole = "worker-new" - - BeforeEach(func() { - newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole) - - reserved := performancev2.CPUSet("0") - isolated := performancev2.CPUSet("1-3") - - secondProfile = &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev2.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "second-profile", - }, - Spec: performancev2.PerformanceProfileSpec{ - CPU: &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - }, - NodeSelector: map[string]string{newLabel: ""}, - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - AdditionalKernelArgs: []string{ - "NEW_ARGUMENT", - }, - NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.StringPtr("restricted"), - }, - }, - } - - machineConfigSelector := componentprofile.GetMachineConfigLabel(secondProfile) - secondMCP = &mcov1.MachineConfigPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "second-mcp", - Labels: map[string]string{ - machineconfigv1.MachineConfigRoleLabelKey: newRole, - }, - }, - Spec: mcov1.MachineConfigPoolSpec{ - MachineConfigSelector: &metav1.LabelSelector{ - MatchLabels: machineConfigSelector, - }, - NodeSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - newLabel: "", - }, - }, - }, - } - - Expect(testclient.Client.Create(context.TODO(), secondMCP)).ToNot(HaveOccurred()) - }) - - AfterEach(func() { - if secondProfile != nil { - if err := profiles.Delete(secondProfile.Name); err != nil { - klog.Warningf("failed to delete the performance profile %q: %v", secondProfile.Name, err) - } - } - - if secondMCP != nil { - if err := mcps.Delete(secondMCP.Name); err != nil { - klog.Warningf("failed to delete the machine config pool %q: %v", secondMCP.Name, err) - } - } - }) - - It("[test_id:32364] Verifies that cluster can have multiple profiles", func() { - Expect(testclient.Client.Create(context.TODO(), secondProfile)).ToNot(HaveOccurred()) - - By("Checking that new KubeletConfig, MachineConfig and RuntimeClass created") - configKey := types.NamespacedName{ - Name: components.GetComponentName(secondProfile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceNone, - } - kubeletConfig := &machineconfigv1.KubeletConfig{} - err := testclient.GetWithRetry(context.TODO(), configKey, kubeletConfig) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find KubeletConfig object %s", configKey.Name)) - Expect(kubeletConfig.Spec.MachineConfigPoolSelector.MatchLabels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole)) - Expect(kubeletConfig.Spec.KubeletConfig.Raw).Should(ContainSubstring("restricted"), "Can't find value in KubeletConfig") - - runtimeClass := &v1beta1.RuntimeClass{} - err = testclient.GetWithRetry(context.TODO(), configKey, runtimeClass) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find RuntimeClass profile object %s", runtimeClass.Name)) - Expect(runtimeClass.Handler).Should(Equal(machineconfig.HighPerformanceRuntime)) - - machineConfigKey := types.NamespacedName{ - Name: machineconfig.GetMachineConfigName(secondProfile), - Namespace: metav1.NamespaceNone, - } - machineConfig := &machineconfigv1.MachineConfig{} - err = testclient.GetWithRetry(context.TODO(), machineConfigKey, machineConfig) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find MachineConfig object %s", configKey.Name)) - Expect(machineConfig.Labels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole)) - - By("Checking that new Tuned profile created") - tunedKey := types.NamespacedName{ - Name: components.GetComponentName(secondProfile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tunedProfile := &tunedv1.Tuned{} - err = testclient.GetWithRetry(context.TODO(), tunedKey, tunedProfile) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find Tuned profile object %s", tunedKey.Name)) - Expect(tunedProfile.Spec.Recommend[0].MachineConfigLabels[machineconfigv1.MachineConfigRoleLabelKey]).Should(Equal(newRole)) - Expect(*tunedProfile.Spec.Profile[0].Data).Should(ContainSubstring("NEW_ARGUMENT"), "Can't find value in Tuned profile") - - By("Checking that the initial MCP does not start updating") - Consistently(func() corev1.ConditionStatus { - return mcps.GetConditionStatus(testutils.RoleWorkerCNF, machineconfigv1.MachineConfigPoolUpdating) - }, 30, 5).Should(Equal(corev1.ConditionFalse)) - - By("Remove second profile and verify that KubeletConfig and MachineConfig were removed") - Expect(testclient.Client.Delete(context.TODO(), secondProfile)).ToNot(HaveOccurred()) - - profileKey := types.NamespacedName{ - Name: secondProfile.Name, - Namespace: secondProfile.Namespace, - } - Expect(profiles.WaitForDeletion(profileKey, 60*time.Second)).ToNot(HaveOccurred()) - - Consistently(func() corev1.ConditionStatus { - return mcps.GetConditionStatus(testutils.RoleWorkerCNF, machineconfigv1.MachineConfigPoolUpdating) - }, 30, 5).Should(Equal(corev1.ConditionFalse)) - - Expect(testclient.Client.Get(context.TODO(), configKey, kubeletConfig)).To(HaveOccurred(), fmt.Sprintf("KubeletConfig %s should be removed", configKey.Name)) - Expect(testclient.Client.Get(context.TODO(), machineConfigKey, machineConfig)).To(HaveOccurred(), fmt.Sprintf("MachineConfig %s should be removed", configKey.Name)) - Expect(testclient.Client.Get(context.TODO(), configKey, runtimeClass)).To(HaveOccurred(), fmt.Sprintf("RuntimeClass %s should be removed", configKey.Name)) - Expect(testclient.Client.Get(context.TODO(), tunedKey, tunedProfile)).To(HaveOccurred(), fmt.Sprintf("Tuned profile object %s should be removed", tunedKey.Name)) - - By("Checking that initial KubeletConfig and MachineConfig still exist") - initialKey := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix), - Namespace: components.NamespaceNodeTuningOperator, - } - err = testclient.GetWithRetry(context.TODO(), initialKey, &machineconfigv1.KubeletConfig{}) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find KubeletConfig object %s", initialKey.Name)) - - initialMachineConfigKey := types.NamespacedName{ - Name: machineconfig.GetMachineConfigName(profile), - Namespace: metav1.NamespaceNone, - } - err = testclient.GetWithRetry(context.TODO(), initialMachineConfigKey, &machineconfigv1.MachineConfig{}) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("cannot find MachineConfig object %s", initialKey.Name)) - }) - }) - - Context("Verify API Conversions", func() { - verifyV2V1 := func() { - By("Checking v2 -> v1 conversion") - v1Profile := &performancev1.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - - err := testclient.Client.Get(context.TODO(), key, v1Profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile") - Expect(verifyV2Conversion(profile, v1Profile)).ToNot(HaveOccurred()) - - By("Checking v1 -> v2 conversion") - v1Profile.Name = "v1" - v1Profile.ResourceVersion = "" - v1Profile.Spec.NodeSelector = map[string]string{"v1/v1": "v1"} - v1Profile.Spec.MachineConfigPoolSelector = nil - v1Profile.Spec.MachineConfigLabel = nil - Expect(testclient.Client.Create(context.TODO(), v1Profile)).ToNot(HaveOccurred()) - - defer func() { - Expect(testclient.Client.Delete(context.TODO(), v1Profile)).ToNot(HaveOccurred()) - Expect(profiles.WaitForDeletion(key, 60*time.Second)).ToNot(HaveOccurred()) - }() - - key = types.NamespacedName{ - Name: v1Profile.Name, - Namespace: v1Profile.Namespace, - } - err = testclient.Client.Get(context.TODO(), key, v1Profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile") - - v2Profile := &performancev2.PerformanceProfile{} - err = testclient.GetWithRetry(context.TODO(), key, v2Profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v2Profile") - Expect(verifyV2Conversion(v2Profile, v1Profile)).ToNot(HaveOccurred()) - } - - verifyV1VAlpha1 := func() { - By("Acquiring the tests profile as a v1 profile") - v1Profile := &performancev1.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - - err := testclient.Client.Get(context.TODO(), key, v1Profile) - Expect(err).ToNot(HaveOccurred(), "Failed acquiring a v1 profile") - - By("Checking v1 -> v1alpha1 conversion") - v1alpha1Profile := &performancev1alpha1.PerformanceProfile{} - key = types.NamespacedName{ - Name: v1Profile.Name, - Namespace: v1Profile.Namespace, - } - - err = testclient.Client.Get(context.TODO(), key, v1alpha1Profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v1alpha1Profile") - Expect(verifyV1alpha1Conversion(v1alpha1Profile, v1Profile)).ToNot(HaveOccurred()) - - By("Checking v1alpha1 -> v1 conversion") - v1alpha1Profile.Name = "v1alpha" - v1alpha1Profile.ResourceVersion = "" - v1alpha1Profile.Spec.NodeSelector = map[string]string{"v1alpha/v1alpha": "v1alpha"} - v1alpha1Profile.Spec.MachineConfigPoolSelector = nil - v1alpha1Profile.Spec.MachineConfigLabel = nil - Expect(testclient.Client.Create(context.TODO(), v1alpha1Profile)).ToNot(HaveOccurred()) - - key = types.NamespacedName{ - Name: v1alpha1Profile.Name, - Namespace: v1alpha1Profile.Namespace, - } - - defer func() { - Expect(testclient.Client.Delete(context.TODO(), v1alpha1Profile)).ToNot(HaveOccurred()) - Expect(profiles.WaitForDeletion(key, 60*time.Second)).ToNot(HaveOccurred()) - }() - - v1Profile = &performancev1.PerformanceProfile{} - err = testclient.GetWithRetry(context.TODO(), key, v1Profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v1profile") - Expect(verifyV1alpha1Conversion(v1alpha1Profile, v1Profile)).ToNot(HaveOccurred()) - } - - // empty context to use the same JustBeforeEach and AfterEach - Context("", func() { - var testProfileName string - var globallyDisableIrqLoadBalancing bool - - JustBeforeEach(func() { - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - err := testclient.Client.Get(context.TODO(), key, profile) - Expect(err).ToNot(HaveOccurred(), "Failed to get profile") - - profile.Name = testProfileName - profile.ResourceVersion = "" - profile.Spec.NodeSelector = map[string]string{"test/test": "test"} - profile.Spec.GloballyDisableIrqLoadBalancing = pointer.BoolPtr(globallyDisableIrqLoadBalancing) - profile.Spec.MachineConfigPoolSelector = nil - profile.Spec.MachineConfigLabel = nil - - err = testclient.Client.Create(context.TODO(), profile) - Expect(err).ToNot(HaveOccurred(), "Failed to create profile") - - // we need to get updated profile object after the name and spec changes - key = types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - err = testclient.Client.Get(context.TODO(), key, profile) - Expect(err).ToNot(HaveOccurred(), "Failed to get profile") - }) - - When("the GloballyDisableIrqLoadBalancing field set to false", func() { - BeforeEach(func() { - testProfileName = "gdilb-false" - globallyDisableIrqLoadBalancing = false - }) - - It("should preserve the value during the v1 <-> v2 conversion", func() { - verifyV2V1() - }) - }) - - When("the GloballyDisableIrqLoadBalancing field set to true", func() { - BeforeEach(func() { - testProfileName = "gdilb-true" - globallyDisableIrqLoadBalancing = true - }) - - It("should preserve the value during the v1 <-> v2 conversion", func() { - verifyV2V1() - }) - }) - - AfterEach(func() { - Expect(testclient.Client.Delete(context.TODO(), profile)).ToNot(HaveOccurred()) - Expect(profiles.WaitForDeletion(types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - }, 60*time.Second)).ToNot(HaveOccurred()) - }) - - }) - - When("the performance profile does not contain NUMA field", func() { - BeforeEach(func() { - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - err := testclient.Client.Get(context.TODO(), key, profile) - Expect(err).ToNot(HaveOccurred(), "Failed getting v1Profile") - - profile.Name = "without-numa" - profile.ResourceVersion = "" - profile.Spec.NodeSelector = map[string]string{"withoutNUMA/withoutNUMA": "withoutNUMA"} - profile.Spec.NUMA = nil - profile.Spec.MachineConfigPoolSelector = nil - profile.Spec.MachineConfigLabel = nil - - err = testclient.Client.Create(context.TODO(), profile) - Expect(err).ToNot(HaveOccurred(), "Failed to create profile without NUMA") - }) - - AfterEach(func() { - Expect(testclient.Client.Delete(context.TODO(), profile)).ToNot(HaveOccurred()) - Expect(profiles.WaitForDeletion(types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - }, 60*time.Second)).ToNot(HaveOccurred()) - }) - - It("Verifies v1 <-> v1alpha1 conversions", func() { - verifyV1VAlpha1() - }) - - It("Verifies v1 <-> v2 conversions", func() { - verifyV2V1() - }) - }) - - It("[test_id:35887] Verifies v1 <-> v1alpha1 conversions", func() { - verifyV1VAlpha1() - }) - - It("[test_id:35888] Verifies v1 <-> v2 conversions", func() { - verifyV2V1() - }) - }) - - Context("Validation webhook", func() { - BeforeEach(func() { - if discovery.Enabled() { - Skip("Discovery mode enabled, test skipped because it creates incorrect profiles") - } - }) - - validateObject := func(obj client.Object, message string) { - err := testclient.Client.Create(context.TODO(), obj) - Expect(err).To(HaveOccurred(), "expected the validation error") - Expect(err.Error()).To(ContainSubstring(message)) - } - - Context("with API version v1alpha1 profile", func() { - var v1alpha1Profile *performancev1alpha1.PerformanceProfile - - BeforeEach(func() { - v1alpha1Profile = &performancev1alpha1.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev1alpha1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "v1alpha1-profile", - }, - Spec: performancev1alpha1.PerformanceProfileSpec{ - RealTimeKernel: &performancev1alpha1.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NodeSelector: map[string]string{"v1alpha1/v1alpha1": "v1alpha1"}, - NUMA: &performancev1alpha1.NUMA{ - TopologyPolicy: pointer.StringPtr("restricted"), - }, - }, - } - }) - - It("should reject the creation of the profile with overlapping CPUs", func() { - reserved := performancev1alpha1.CPUSet("0-3") - isolated := performancev1alpha1.CPUSet("0-7") - - v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v1alpha1Profile, "reserved and isolated cpus overlap") - }) - - It("should reject the creation of the profile with no isolated CPUs", func() { - reserved := performancev1alpha1.CPUSet("0-3") - isolated := performancev1alpha1.CPUSet("") - - v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v1alpha1Profile, "isolated CPUs can not be empty") - }) - - It("should reject the creation of the profile with the node selector that already in use", func() { - reserved := performancev1alpha1.CPUSet("0,1") - isolated := performancev1alpha1.CPUSet("2,3") - - v1alpha1Profile.Spec.CPU = &performancev1alpha1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - v1alpha1Profile.Spec.NodeSelector = testutils.NodeSelectorLabels - validateObject(v1alpha1Profile, "the profile has the same node selector as the performance profile") - }) - }) - - Context("with API version v1 profile", func() { - var v1Profile *performancev1.PerformanceProfile - - BeforeEach(func() { - v1Profile = &performancev1.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev1.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "v1-profile", - }, - Spec: performancev1.PerformanceProfileSpec{ - RealTimeKernel: &performancev1.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NodeSelector: map[string]string{"v1/v1": "v1"}, - NUMA: &performancev1.NUMA{ - TopologyPolicy: pointer.StringPtr("restricted"), - }, - }, - } - }) - - It("should reject the creation of the profile with overlapping CPUs", func() { - reserved := performancev1.CPUSet("0-3") - isolated := performancev1.CPUSet("0-7") - - v1Profile.Spec.CPU = &performancev1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v1Profile, "reserved and isolated cpus overlap") - }) - - It("should reject the creation of the profile with no isolated CPUs", func() { - reserved := performancev1.CPUSet("0-3") - isolated := performancev1.CPUSet("") - - v1Profile.Spec.CPU = &performancev1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v1Profile, "isolated CPUs can not be empty") - }) - - It("should reject the creation of the profile with the node selector that already in use", func() { - reserved := performancev1.CPUSet("0,1") - isolated := performancev1.CPUSet("2,3") - - v1Profile.Spec.CPU = &performancev1.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - v1Profile.Spec.NodeSelector = testutils.NodeSelectorLabels - validateObject(v1Profile, "the profile has the same node selector as the performance profile") - }) - }) - - Context("with profile version v2", func() { - var v2Profile *performancev2.PerformanceProfile - - BeforeEach(func() { - v2Profile = &performancev2.PerformanceProfile{ - TypeMeta: metav1.TypeMeta{ - Kind: "PerformanceProfile", - APIVersion: performancev2.GroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "v2-profile", - }, - Spec: performancev2.PerformanceProfileSpec{ - RealTimeKernel: &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - }, - NodeSelector: map[string]string{"v2/v2": "v2"}, - NUMA: &performancev2.NUMA{ - TopologyPolicy: pointer.StringPtr("restricted"), - }, - }, - } - }) - - It("should reject the creation of the profile with overlapping CPUs", func() { - reserved := performancev2.CPUSet("0-3") - isolated := performancev2.CPUSet("0-7") - - v2Profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v2Profile, "reserved and isolated cpus overlap") - }) - - It("should reject the creation of the profile with no isolated CPUs", func() { - reserved := performancev2.CPUSet("0-3") - isolated := performancev2.CPUSet("") - - v2Profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - validateObject(v2Profile, "isolated CPUs can not be empty") - }) - - It("should reject the creation of the profile with the node selector that already in use", func() { - reserved := performancev2.CPUSet("0,1") - isolated := performancev2.CPUSet("2,3") - - v2Profile.Spec.CPU = &performancev2.CPU{ - Reserved: &reserved, - Isolated: &isolated, - } - v2Profile.Spec.NodeSelector = testutils.NodeSelectorLabels - validateObject(v2Profile, "the profile has the same node selector as the performance profile") - }) - }) - }) -}) - -func verifyV1alpha1Conversion(v1alpha1Profile *performancev1alpha1.PerformanceProfile, v1Profile *performancev1.PerformanceProfile) error { - specCPU := v1alpha1Profile.Spec.CPU - if (specCPU == nil) != (v1Profile.Spec.CPU == nil) { - return fmt.Errorf("spec CPUs field is different") - } - - if specCPU != nil { - if (specCPU.Reserved == nil) != (v1Profile.Spec.CPU.Reserved == nil) { - return fmt.Errorf("spec CPUs Reserved field is different") - } - if specCPU.Reserved != nil { - if string(*specCPU.Reserved) != string(*v1Profile.Spec.CPU.Reserved) { - return fmt.Errorf("reserved CPUs are different [v1alpha1: %s, v1: %s]", - *specCPU.Reserved, *v1Profile.Spec.CPU.Reserved) - } - } - - if (specCPU.Isolated == nil) != (v1Profile.Spec.CPU.Isolated == nil) { - return fmt.Errorf("spec CPUs Isolated field is different") - } - if specCPU.Isolated != nil { - if string(*specCPU.Isolated) != string(*v1Profile.Spec.CPU.Isolated) { - return fmt.Errorf("isolated CPUs are different [v1alpha1: %s, v1: %s]", - *specCPU.Isolated, *v1Profile.Spec.CPU.Isolated) - } - } - - if (specCPU.BalanceIsolated == nil) != (v1Profile.Spec.CPU.BalanceIsolated == nil) { - return fmt.Errorf("spec CPUs BalanceIsolated field is different") - } - if specCPU.BalanceIsolated != nil { - if *specCPU.BalanceIsolated != *v1Profile.Spec.CPU.BalanceIsolated { - return fmt.Errorf("balanceIsolated field is different [v1alpha1: %t, v1: %t]", - *specCPU.BalanceIsolated, *v1Profile.Spec.CPU.BalanceIsolated) - } - } - } - - specHugePages := v1alpha1Profile.Spec.HugePages - if (specHugePages == nil) != (v1Profile.Spec.HugePages == nil) { - return fmt.Errorf("spec HugePages field is different") - } - - if specHugePages != nil { - if (specHugePages.DefaultHugePagesSize == nil) != (v1Profile.Spec.HugePages.DefaultHugePagesSize == nil) { - return fmt.Errorf("spec HugePages defaultHugePagesSize field is different") - } - if specHugePages.DefaultHugePagesSize != nil { - if string(*specHugePages.DefaultHugePagesSize) != string(*v1Profile.Spec.HugePages.DefaultHugePagesSize) { - return fmt.Errorf("defaultHugePagesSize field is different [v1alpha1: %s, v1: %s]", - *specHugePages.DefaultHugePagesSize, *v1Profile.Spec.HugePages.DefaultHugePagesSize) - } - } - - if len(specHugePages.Pages) != len(v1Profile.Spec.HugePages.Pages) { - return fmt.Errorf("pages field is different [v1alpha1: %v, v1: %v]", - specHugePages.Pages, v1Profile.Spec.HugePages.Pages) - } - - for i, v1alpha1Page := range specHugePages.Pages { - v1page := v1Profile.Spec.HugePages.Pages[i] - if string(v1alpha1Page.Size) != string(v1page.Size) || - (v1alpha1Page.Node == nil) != (v1page.Node == nil) || - (v1alpha1Page.Node != nil && *v1alpha1Page.Node != *v1page.Node) || - v1alpha1Page.Count != v1page.Count { - return fmt.Errorf("pages field is different [v1alpha1: %v, v1: %v]", - specHugePages.Pages, v1Profile.Spec.HugePages.Pages) - } - } - } - - if !reflect.DeepEqual(v1alpha1Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) { - return fmt.Errorf("machineConfigLabel field is different [v1alpha1: %v, v1: %v]", - v1alpha1Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) - } - - if !reflect.DeepEqual(v1alpha1Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) { - return fmt.Errorf("machineConfigPoolSelector field is different [v1alpha1: %v, v1: %v]", - v1alpha1Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) - } - - if !reflect.DeepEqual(v1alpha1Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) { - return fmt.Errorf("nodeSelector field is different [v1alpha1: %v, v1: %v]", - v1alpha1Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) - } - - specRealTimeKernel := v1alpha1Profile.Spec.RealTimeKernel - if (specRealTimeKernel == nil) != (v1Profile.Spec.RealTimeKernel == nil) { - return fmt.Errorf("spec RealTimeKernel field is different") - } - - if specRealTimeKernel != nil { - if (specRealTimeKernel.Enabled == nil) != (v1Profile.Spec.RealTimeKernel.Enabled == nil) { - return fmt.Errorf("spec RealTimeKernel.Enabled field is different") - } - - if specRealTimeKernel.Enabled != nil { - if *specRealTimeKernel.Enabled != *v1Profile.Spec.RealTimeKernel.Enabled { - return fmt.Errorf("specRealTimeKernel field is different [v1alpha1: %t, v1: %t]", - *specRealTimeKernel.Enabled, *v1Profile.Spec.RealTimeKernel.Enabled) - } - } - } - - if !reflect.DeepEqual(v1alpha1Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) { - return fmt.Errorf("additionalKernelArgs field is different [v1alpha1: %v, v1: %v]", - v1alpha1Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) - } - - specNUMA := v1alpha1Profile.Spec.NUMA - if (specNUMA == nil) != (v1Profile.Spec.NUMA == nil) { - return fmt.Errorf("spec NUMA field is different") - } - - if specNUMA != nil { - if (specNUMA.TopologyPolicy == nil) != (v1Profile.Spec.NUMA.TopologyPolicy == nil) { - return fmt.Errorf("spec NUMA topologyPolicy field is different") - } - if specNUMA.TopologyPolicy != nil { - if *specNUMA.TopologyPolicy != *v1Profile.Spec.NUMA.TopologyPolicy { - return fmt.Errorf("topologyPolicy field is different [v1alpha1: %s, v1: %s]", - *specNUMA.TopologyPolicy, *v1Profile.Spec.NUMA.TopologyPolicy) - } - } - } - - return nil -} - -func verifyV2Conversion(v2Profile *performancev2.PerformanceProfile, v1Profile *performancev1.PerformanceProfile) error { - specCPU := v2Profile.Spec.CPU - if (specCPU == nil) != (v1Profile.Spec.CPU == nil) { - return fmt.Errorf("spec CPUs field is different") - } - - if specCPU != nil { - if (specCPU.Reserved == nil) != (v1Profile.Spec.CPU.Reserved == nil) { - return fmt.Errorf("spec CPUs Reserved field is different") - } - if specCPU.Reserved != nil { - if string(*specCPU.Reserved) != string(*v1Profile.Spec.CPU.Reserved) { - return fmt.Errorf("reserved CPUs are different [v2: %s, v1: %s]", - *specCPU.Reserved, *v1Profile.Spec.CPU.Reserved) - } - } - - if (specCPU.Isolated == nil) != (v1Profile.Spec.CPU.Isolated == nil) { - return fmt.Errorf("spec CPUs Isolated field is different") - } - if specCPU.Isolated != nil { - if string(*specCPU.Isolated) != string(*v1Profile.Spec.CPU.Isolated) { - return fmt.Errorf("isolated CPUs are different [v2: %s, v1: %s]", - *specCPU.Isolated, *v1Profile.Spec.CPU.Isolated) - } - } - - if (specCPU.BalanceIsolated == nil) != (v1Profile.Spec.CPU.BalanceIsolated == nil) { - return fmt.Errorf("spec CPUs BalanceIsolated field is different") - } - if specCPU.BalanceIsolated != nil { - if *specCPU.BalanceIsolated != *v1Profile.Spec.CPU.BalanceIsolated { - return fmt.Errorf("balanceIsolated field is different [v2: %t, v1: %t]", - *specCPU.BalanceIsolated, *v1Profile.Spec.CPU.BalanceIsolated) - } - } - } - - specHugePages := v2Profile.Spec.HugePages - if (specHugePages == nil) != (v1Profile.Spec.HugePages == nil) { - return fmt.Errorf("spec HugePages field is different") - } - - if specHugePages != nil { - if (specHugePages.DefaultHugePagesSize == nil) != (v1Profile.Spec.HugePages.DefaultHugePagesSize == nil) { - return fmt.Errorf("spec HugePages defaultHugePagesSize field is different") - } - if specHugePages.DefaultHugePagesSize != nil { - if string(*specHugePages.DefaultHugePagesSize) != string(*v1Profile.Spec.HugePages.DefaultHugePagesSize) { - return fmt.Errorf("defaultHugePagesSize field is different [v2: %s, v1: %s]", - *specHugePages.DefaultHugePagesSize, *v1Profile.Spec.HugePages.DefaultHugePagesSize) - } - } - - if len(specHugePages.Pages) != len(v1Profile.Spec.HugePages.Pages) { - return fmt.Errorf("pages field is different [v2: %v, v1: %v]", - specHugePages.Pages, v1Profile.Spec.HugePages.Pages) - } - - for i, v1alpha1Page := range specHugePages.Pages { - v1page := v1Profile.Spec.HugePages.Pages[i] - if string(v1alpha1Page.Size) != string(v1page.Size) || - (v1alpha1Page.Node == nil) != (v1page.Node == nil) || - (v1alpha1Page.Node != nil && *v1alpha1Page.Node != *v1page.Node) || - v1alpha1Page.Count != v1page.Count { - return fmt.Errorf("pages field is different [v2: %v, v1: %v]", - specHugePages.Pages, v1Profile.Spec.HugePages.Pages) - } - } - } - - if !reflect.DeepEqual(v2Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) { - return fmt.Errorf("machineConfigLabel field is different [v2: %v, v1: %v]", - v2Profile.Spec.MachineConfigLabel, v1Profile.Spec.MachineConfigLabel) - } - - if !reflect.DeepEqual(v2Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) { - return fmt.Errorf("machineConfigPoolSelector field is different [v2: %v, v1: %v]", - v2Profile.Spec.MachineConfigPoolSelector, v1Profile.Spec.MachineConfigPoolSelector) - } - - if !reflect.DeepEqual(v2Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) { - return fmt.Errorf("nodeSelector field is different [v2: %v, v1: %v]", - v2Profile.Spec.NodeSelector, v1Profile.Spec.NodeSelector) - } - - specRealTimeKernel := v2Profile.Spec.RealTimeKernel - if (specRealTimeKernel == nil) != (v1Profile.Spec.RealTimeKernel == nil) { - return fmt.Errorf("spec RealTimeKernel field is different") - } - - if specRealTimeKernel != nil { - if (specRealTimeKernel.Enabled == nil) != (v1Profile.Spec.RealTimeKernel.Enabled == nil) { - return fmt.Errorf("spec RealTimeKernel.Enabled field is different") - } - - if specRealTimeKernel.Enabled != nil { - if *specRealTimeKernel.Enabled != *v1Profile.Spec.RealTimeKernel.Enabled { - return fmt.Errorf("specRealTimeKernel field is different [v2: %t, v1: %t]", - *specRealTimeKernel.Enabled, *v1Profile.Spec.RealTimeKernel.Enabled) - } - } - } - - if !reflect.DeepEqual(v2Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) { - return fmt.Errorf("additionalKernelArgs field is different [v2: %v, v1: %v]", - v2Profile.Spec.AdditionalKernelArgs, v1Profile.Spec.AdditionalKernelArgs) - } - - specNUMA := v2Profile.Spec.NUMA - if (specNUMA == nil) != (v1Profile.Spec.NUMA == nil) { - return fmt.Errorf("spec NUMA field is different") - } - - if specNUMA != nil { - if (specNUMA.TopologyPolicy == nil) != (v1Profile.Spec.NUMA.TopologyPolicy == nil) { - return fmt.Errorf("spec NUMA topologyPolicy field is different") - } - if specNUMA.TopologyPolicy != nil { - if *specNUMA.TopologyPolicy != *v1Profile.Spec.NUMA.TopologyPolicy { - return fmt.Errorf("topologyPolicy field is different [v2: %s, v1: %s]", - *specNUMA.TopologyPolicy, *v1Profile.Spec.NUMA.TopologyPolicy) - } - } - } - - for _, f := range v2Profile.GetObjectMeta().GetManagedFields() { - if f.APIVersion == performancev1alpha1.GroupVersion.String() || - f.APIVersion == performancev1.GroupVersion.String() { - if v2Profile.Spec.GloballyDisableIrqLoadBalancing == nil { - return fmt.Errorf("globallyDisableIrqLoadBalancing field must be set to true") - } - } - } - - return nil -} - -func execSysctlOnWorkers(workerNodes []corev1.Node, sysctlMap map[string]string) { - var err error - var out []byte - for _, node := range workerNodes { - for param, expected := range sysctlMap { - By(fmt.Sprintf("executing the command \"sysctl -n %s\"", param)) - out, err = nodes.ExecCommandOnMachineConfigDaemon(&node, []string{"sysctl", "-n", param}) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.TrimSpace(string(out))).Should(Equal(expected), "parameter %s value is not %s.", param, expected) - } - } -} - -// execute sysctl command inside container in a tuned pod -func validateTunedActiveProfile(wrknodes []corev1.Node) { - var err error - var out []byte - activeProfileName := components.GetComponentName(testutils.PerformanceProfileName, components.ProfileNamePerformance) - - // check if some another Tuned profile overwrites PAO profile - tunedList := &tunedv1.TunedList{} - err = testclient.Client.List(context.TODO(), tunedList) - Expect(err).NotTo(HaveOccurred()) - - for _, t := range tunedList.Items { - if len(t.Spec.Profile) > 0 && t.Spec.Profile[0].Data != nil && strings.Contains(*t.Spec.Profile[0].Data, fmt.Sprintf("include=%s", activeProfileName)) { - testlog.Warning(fmt.Sprintf("PAO tuned profile amended by '%s' profile, test may fail", t.Name)) - if t.Spec.Profile[0].Name != nil { - activeProfileName = *t.Spec.Profile[0].Name - } - } - } - - for _, node := range wrknodes { - tuned := nodes.TunedForNode(&node, RunningOnSingleNode) - tunedName := tuned.ObjectMeta.Name - By(fmt.Sprintf("executing the command cat /etc/tuned/active_profile inside the pod %s", tunedName)) - Eventually(func() string { - out, err = pods.WaitForPodOutput(testclient.K8sClient, tuned, []string{"cat", "/etc/tuned/active_profile"}) - return strings.TrimSpace(string(out)) - }, cluster.ComputeTestTimeout(testTimeout*time.Second, RunningOnSingleNode), testPollInterval*time.Second).Should(Equal(activeProfileName), - fmt.Sprintf("active_profile is not set to %s. %v", activeProfileName, err)) - } -} diff --git a/test/e2e/pao/functests/1_performance/rt-kernel.go b/test/e2e/pao/functests/1_performance/rt-kernel.go deleted file mode 100644 index 9172ca498..000000000 --- a/test/e2e/pao/functests/1_performance/rt-kernel.go +++ /dev/null @@ -1,74 +0,0 @@ -package __performance - -import ( - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" -) - -var _ = Describe("[performance]RT Kernel", func() { - var discoveryFailed bool - var profile *performancev2.PerformanceProfile - var err error - - testutils.BeforeAll(func() { - profile, err = discovery.GetFilteredDiscoveryPerformanceProfile( - func(profile performancev2.PerformanceProfile) bool { - if profile.Spec.RealTimeKernel != nil && - profile.Spec.RealTimeKernel.Enabled != nil && - *profile.Spec.RealTimeKernel.Enabled == true { - return true - } - return false - }) - - if err == discovery.ErrProfileNotFound { - discoveryFailed = true - return - } - Expect(err).ToNot(HaveOccurred(), "failed to get a profile using a filter for RT kernel") - }) - - BeforeEach(func() { - if discoveryFailed { - Skip("Skipping RT Kernel tests since no profile found with RT kernel set") - } - - }) - - It("[test_id:26861][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should have RT kernel enabled", func() { - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) - Expect(workerRTNodes).ToNot(BeEmpty(), "No RT worker node found!") - - err = nodes.HasPreemptRTKernel(&workerRTNodes[0]) - Expect(err).ToNot(HaveOccurred()) - }) - - It("[test_id:28526][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] a node without performance profile applied should not have RT kernel installed", func() { - - By("Skipping test if cluster does not have another available worker node") - nonPerformancesWorkers, err := nodes.GetNonPerformancesWorkers(profile.Spec.NodeSelector) - Expect(err).ToNot(HaveOccurred()) - - if len(nonPerformancesWorkers) == 0 { - Skip("Skipping test because there are no additional non-cnf worker nodes") - } - - cmd := []string{"uname", "-a"} - kernel, err := nodes.ExecCommandOnNode(cmd, &nonPerformancesWorkers[0]) - Expect(err).ToNot(HaveOccurred(), "failed to execute uname") - Expect(kernel).To(ContainSubstring("Linux"), "Node should have Linux string") - - err = nodes.HasPreemptRTKernel(&nonPerformancesWorkers[0]) - Expect(err).To(HaveOccurred(), "Node should have non-RT kernel") - }) -}) diff --git a/test/e2e/pao/functests/1_performance/test_suite_performance_test.go b/test/e2e/pao/functests/1_performance/test_suite_performance_test.go deleted file mode 100644 index 6f344538e..000000000 --- a/test/e2e/pao/functests/1_performance/test_suite_performance_test.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build !unittests -// +build !unittests - -package __performance_test - -import ( - "context" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/apimachinery/pkg/api/errors" - - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" - - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces" -) - -var _ = BeforeSuite(func() { - Expect(testclient.ClientsEnabled).To(BeTrue(), "package client not enabled") - // create test namespace - err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace) - if errors.IsAlreadyExists(err) { - testlog.Warning("test namespace already exists, that is unexpected") - return - } - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) - Expect(err).ToNot(HaveOccurred()) - err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) -}) - -func TestPerformance(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("performance")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator e2e tests", rr) -} diff --git a/test/e2e/pao/functests/1_performance/topology_manager.go b/test/e2e/pao/functests/1_performance/topology_manager.go deleted file mode 100644 index da9a7f4af..000000000 --- a/test/e2e/pao/functests/1_performance/topology_manager.go +++ /dev/null @@ -1,47 +0,0 @@ -package __performance - -import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - - corev1 "k8s.io/api/core/v1" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" -) - -var _ = Describe("[rfe_id:27350][performance]Topology Manager", func() { - var workerRTNodes []corev1.Node - var profile *performancev2.PerformanceProfile - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - var err error - workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), "Error looking for the optional selector: %v", err) - Expect(workerRTNodes).ToNot(BeEmpty(), "No RT worker node found!") - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - }) - - It("[test_id:26932][crit:high][vendor:cnf-qe@redhat.com][level:acceptance] should be enabled with the policy specified in profile", func() { - kubeletConfig, err := nodes.GetKubeletConfig(&workerRTNodes[0]) - Expect(err).ToNot(HaveOccurred()) - - // verify topology manager policy - if profile.Spec.NUMA != nil && profile.Spec.NUMA.TopologyPolicy != nil { - Expect(kubeletConfig.TopologyManagerPolicy).To(Equal(*profile.Spec.NUMA.TopologyPolicy), "Topology Manager policy mismatch got %q expected %q", kubeletConfig.TopologyManagerPolicy, *profile.Spec.NUMA.TopologyPolicy) - } else { - Expect(kubeletConfig.TopologyManagerPolicy).To(Equal(kubeletconfigv1beta1.BestEffortTopologyManagerPolicy), "Topology Manager policy mismatch got %q expected %q", kubeletconfigv1beta1.BestEffortTopologyManagerPolicy) - } - }) -}) diff --git a/test/e2e/pao/functests/2_performance_update/kubelet.go b/test/e2e/pao/functests/2_performance_update/kubelet.go deleted file mode 100644 index 9910b0e33..000000000 --- a/test/e2e/pao/functests/2_performance_update/kubelet.go +++ /dev/null @@ -1,196 +0,0 @@ -package __performance_update - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "strings" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -var _ = Describe("[ref_id: 45487][performance]additional kubelet arguments", func() { - var profile *performancev2.PerformanceProfile - var workerRTNodes []corev1.Node - var performanceMCP string - - testutils.BeforeAll(func() { - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred()) - - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - performanceMCP, err = mcps.GetByProfile(profile) - Expect(err).ToNot(HaveOccurred()) - - // Verify that worker and performance MCP have updated state equals to true - for _, mcpName := range []string{testutils.RoleWorker, performanceMCP} { - mcps.WaitForCondition(mcpName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - } - - }) - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - }) - Context("Additional kubelet arguments", func() { - It("[test_id:45488]Test performance profile annotation for changing multiple kubelet settings", func() { - profile.Annotations = map[string]string{ - "kubeletconfig.experimental": "{\"allowedUnsafeSysctls\":[\"net.core.somaxconn\",\"kernel.msg*\"],\"systemReserved\":{\"memory\":\"300Mi\"},\"kubeReserved\":{\"memory\":\"768Mi\"},\"imageMinimumGCAge\":\"3m\"}", - } - annotations, err := json.Marshal(profile.Annotations) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - for _, node := range workerRTNodes { - kubeletConfig, err := nodes.GetKubeletConfig(&node) - Expect(err).ToNot(HaveOccurred()) - sysctlsValue := kubeletConfig.AllowedUnsafeSysctls - Expect(sysctlsValue).Should(ContainElements("net.core.somaxconn", "kernel.msg*")) - Expect(kubeletConfig.KubeReserved["memory"]).To(Equal("768Mi")) - Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).To(Equal(180)) - } - kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"} - for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandOnNode(kubeletArguments, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) - } - }) - Context("When setting cpu manager related parameters", func() { - It("[test_id:45493]Should not override performance-addon-operator values", func() { - cpuManagerAnnotation := map[string]string{ - "kubeletconfig.experimental": "{\"cpuManagerPolicy\":\"static\",\"cpuManagerReconcilePeriod\":\"5s\"}", - } - profile.SetAnnotations(cpuManagerAnnotation) - By("Applying changes in performance profile and waiting until mcp will start updating") - profiles.UpdateWithRetry(profile) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - for _, node := range workerRTNodes { - kubeletConfig, err := nodes.GetKubeletConfig(&node) - Expect(err).ToNot(HaveOccurred()) - Expect(kubeletConfig.CPUManagerPolicy).Should(Equal("static")) - Expect(kubeletConfig.CPUManagerReconcilePeriod.Seconds()).To(Equal(5)) - } - }) - }) - It("[test_id:45490]Test memory reservation changes", func() { - // In this test case we are testing if after applying reserving memory for - // systemReserved and KubeReserved, the allocatable is reduced and Allocatable - // Verify that Allocatable = Node capacity - (kubereserved + systemReserved + EvictionMemory) - profile.Annotations = map[string]string{ - "kubeletconfig.experimental": "{\"systemReserved\":{\"memory\":\"300Mi\"},\"kubeReserved\":{\"memory\":\"768Mi\"}}", - } - annotations, err := json.Marshal(profile.Annotations) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - for _, node := range workerRTNodes { - kubeletConfig, err := nodes.GetKubeletConfig(&node) - Expect(err).ToNot(HaveOccurred()) - totalCapactity := node.Status.Capacity.Memory().MilliValue() - evictionMemory := kubeletConfig.EvictionHard["memory.available"] - kubeReserved := kubeletConfig.KubeReserved["memory"] - evictionMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(evictionMemory, "Mi"), 10, 64) - kubeReservedMemoryInt, err := strconv.ParseInt(strings.TrimSuffix(kubeReserved, "Mi"), 10, 64) - systemReservedResource := resource.NewQuantity(300*1024*1024, resource.BinarySI) - kubeReservedMemoryResource := resource.NewQuantity(kubeReservedMemoryInt*1024*1024, resource.BinarySI) - evictionMemoryResource := resource.NewQuantity(evictionMemoryInt*1024*1024, resource.BinarySI) - totalKubeMemory := systemReservedResource.MilliValue() + kubeReservedMemoryResource.MilliValue() + evictionMemoryResource.MilliValue() - calculatedAllocatable := totalCapactity - totalKubeMemory - currentAllocatable := node.Status.Allocatable.Memory().MilliValue() - Expect(calculatedAllocatable).To(Equal(currentAllocatable)) - } - }) - It("[test_id:45495] Test setting PAO managed parameters", func() { - profile.Annotations = map[string]string{ - "kubeletconfig.experimental": "{\"topologyManagerPolicy\":\"single-numa-node\"}", - } - annotations, err := json.Marshal(profile.Annotations) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/annotations", "value": %s }]`, annotations)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - for _, node := range workerRTNodes { - kubeletConfig, err := nodes.GetKubeletConfig(&node) - Expect(err).ToNot(HaveOccurred()) - Expect(kubeletConfig.TopologyManagerPolicy).To(Equal("single-numa-node")) - } - }) - It("[test_id:45489] Verify settings are reverted to default profile", func() { - By("Reverting the Profile") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "remove", "path": "/metadata/annotations/kubeletconfig.experimental"}]`)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - kubeletArguments := []string{"/bin/bash", "-c", "ps -ef | grep kubelet | grep config"} - for _, node := range workerRTNodes { - kubeletConfig, err := nodes.GetKubeletConfig(&node) - Expect(err).ToNot(HaveOccurred()) - Expect(kubeletConfig.AllowedUnsafeSysctls).To(Equal(nil)) - Expect(kubeletConfig.KubeReserved["memory"]).ToNot(Equal("768Mi")) - Expect(kubeletConfig.ImageMinimumGCAge.Seconds()).ToNot(Equal(180)) - } - for _, node := range workerRTNodes { - stdout, err := nodes.ExecCommandOnNode(kubeletArguments, &node) - Expect(err).ToNot(HaveOccurred()) - Expect(strings.Contains(stdout, "300Mi")).To(BeTrue()) - } - - }) - - }) -}) diff --git a/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go b/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go deleted file mode 100644 index 0972ae252..000000000 --- a/test/e2e/pao/functests/2_performance_update/test_suite_performance_update_test.go +++ /dev/null @@ -1,51 +0,0 @@ -//go:build !unittests -// +build !unittests - -package __performance_update_test - -import ( - "context" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/apimachinery/pkg/api/errors" - - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" - - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces" -) - -var _ = BeforeSuite(func() { - Expect(testclient.ClientsEnabled).To(BeTrue()) - // create test namespace - err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace) - if errors.IsAlreadyExists(err) { - testlog.Warning("test namespace already exists, that is unexpected") - return - } - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) - Expect(err).ToNot(HaveOccurred()) - err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) -}) - -func TestPerformanceUpdate(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("performance_update")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator Update e2e tests", rr) -} diff --git a/test/e2e/pao/functests/2_performance_update/updating_profile.go b/test/e2e/pao/functests/2_performance_update/updating_profile.go deleted file mode 100644 index f3a2b08a2..000000000 --- a/test/e2e/pao/functests/2_performance_update/updating_profile.go +++ /dev/null @@ -1,589 +0,0 @@ -package __performance_update - -import ( - "context" - "encoding/json" - "fmt" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" -) - -type checkFunction func(*corev1.Node) (string, error) - -var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance profile", func() { - var workerRTNodes []corev1.Node - var profile, initialProfile *performancev2.PerformanceProfile - var performanceMCP string - var err error - - chkCmdLine := []string{"cat", "/proc/cmdline"} - chkKubeletConfig := []string{"cat", "/rootfs/etc/kubernetes/kubelet.conf"} - chkIrqbalance := []string{"cat", "/rootfs/etc/sysconfig/irqbalance"} - - chkCmdLineFn := func(node *corev1.Node) (string, error) { - return nodes.ExecCommandOnNode(chkCmdLine, node) - } - chkKubeletConfigFn := func(node *corev1.Node) (string, error) { - return nodes.ExecCommandOnNode(chkKubeletConfig, node) - } - - chkHugepages2MFn := func(node *corev1.Node) (string, error) { - count, err := countHugepagesOnNode(node, 2) - if err != nil { - return "", err - } - return strconv.Itoa(count), nil - } - - chkHugepages1GFn := func(node *corev1.Node) (string, error) { - count, err := countHugepagesOnNode(node, 1024) - if err != nil { - return "", err - } - return strconv.Itoa(count), nil - } - - nodeLabel := testutils.NodeSelectorLabels - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - workerRTNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) - Expect(workerRTNodes).ToNot(BeEmpty(), "cannot find RT enabled worker nodes") - profile, err = profiles.GetByNodeLabels(nodeLabel) - Expect(err).ToNot(HaveOccurred()) - performanceMCP, err = mcps.GetByProfile(profile) - Expect(err).ToNot(HaveOccurred()) - - // Verify that worker and performance MCP have updated state equals to true - for _, mcpName := range []string{testutils.RoleWorker, performanceMCP} { - mcps.WaitForCondition(mcpName, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - } - }) - - Context("Verify GloballyDisableIrqLoadBalancing Spec field", func() { - It("[test_id:36150] Verify that IRQ load balancing is enabled/disabled correctly", func() { - irqLoadBalancingDisabled := profile.Spec.GloballyDisableIrqLoadBalancing != nil && *profile.Spec.GloballyDisableIrqLoadBalancing - - Expect(profile.Spec.CPU.Isolated).NotTo(BeNil(), "expected isolated CPUs, found none") - isolatedCPUSet, err := cpuset.Parse(string(*profile.Spec.CPU.Isolated)) - Expect(err).ToNot(HaveOccurred()) - - verifyNodes := func() error { - var expectedBannedCPUs cpuset.CPUSet - if irqLoadBalancingDisabled { - expectedBannedCPUs = isolatedCPUSet - } else { - expectedBannedCPUs = cpuset.NewCPUSet() - } - - for _, node := range workerRTNodes { - By(fmt.Sprintf("verifying worker node %q", node.Name)) - - bannedCPUs, err := nodes.BannedCPUs(node) - Expect(err).ToNot(HaveOccurred(), "failed to extract the banned CPUs from node %s", node.Name) - - if !bannedCPUs.Equals(expectedBannedCPUs) { - return fmt.Errorf("banned CPUs %v do not match the expected mask %v on node %s", - bannedCPUs, expectedBannedCPUs, node.Name) - } - - smpAffinitySet, err := nodes.GetDefaultSmpAffinitySet(&node) - Expect(err).ToNot(HaveOccurred(), "failed to get default smp affinity") - - onlineCPUsSet, err := nodes.GetOnlineCPUsSet(&node) - Expect(err).ToNot(HaveOccurred(), "failed to get Online CPUs list") - - if irqLoadBalancingDisabled { - if !smpAffinitySet.Equals(onlineCPUsSet.Difference(isolatedCPUSet)) { - return fmt.Errorf("found default_smp_affinity %v, expected %v", - smpAffinitySet, onlineCPUsSet.Difference(isolatedCPUSet)) - } - } else { - if !smpAffinitySet.Equals(onlineCPUsSet) { - return fmt.Errorf("found default_smp_affinity %v, expected %v", - smpAffinitySet, onlineCPUsSet) - } - } - } - return nil - } - - err = verifyNodes() - Expect(err).ToNot(HaveOccurred()) - - By("Modifying profile") - initialProfile = profile.DeepCopy() - - irqLoadBalancingDisabled = !irqLoadBalancingDisabled - profile.Spec.GloballyDisableIrqLoadBalancing = &irqLoadBalancingDisabled - - spec, err := json.Marshal(profile.Spec) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - - defer func() { // return initial configuration - spec, err := json.Marshal(initialProfile.Spec) - Expect(err).ToNot(HaveOccurred()) - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - }() - - Eventually(verifyNodes, 1*time.Minute, 10*time.Second).ShouldNot(HaveOccurred()) - }) - }) - - Context("Verify hugepages count split on two NUMA nodes", func() { - hpSize2M := performancev2.HugePageSize("2M") - - table.DescribeTable("Verify that profile parameters were updated", func(hpCntOnNuma0 int32, hpCntOnNuma1 int32) { - By("Verifying cluster configuration matches the requirement") - for _, node := range workerRTNodes { - numaInfo, err := nodes.GetNumaNodes(&node) - Expect(err).ToNot(HaveOccurred()) - if len(numaInfo) < 2 { - Skip(fmt.Sprintf("This test need 2 NUMA nodes.The number of NUMA nodes on node %s < 2", node.Name)) - } - } - //have total of 4 cpus so VMs can handle running the configuration - numaInfo, _ := nodes.GetNumaNodes(&workerRTNodes[0]) - cpuSlice := numaInfo[0][0:4] - isolated := performancev2.CPUSet(fmt.Sprintf("%d-%d", cpuSlice[2], cpuSlice[3])) - reserved := performancev2.CPUSet(fmt.Sprintf("%d-%d", cpuSlice[0], cpuSlice[1])) - - By("Modifying profile") - initialProfile = profile.DeepCopy() - profile.Spec.CPU = &performancev2.CPU{ - BalanceIsolated: pointer.BoolPtr(false), - Reserved: &reserved, - Isolated: &isolated, - } - profile.Spec.HugePages = &performancev2.HugePages{ - DefaultHugePagesSize: &hpSize2M, - Pages: []performancev2.HugePage{ - { - Count: hpCntOnNuma0, - Size: hpSize2M, - Node: pointer.Int32Ptr(0), - }, - { - Count: hpCntOnNuma1, - Size: hpSize2M, - Node: pointer.Int32Ptr(1), - }, - }, - } - profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(true), - } - - By("Verifying that mcp is ready for update") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - spec, err := json.Marshal(profile.Spec) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - for _, node := range workerRTNodes { - for i := 0; i < 2; i++ { - nodeCmd := []string{"cat", hugepagesPathForNode(i, 2)} - result, err := nodes.ExecCommandOnNode(nodeCmd, &node) - Expect(err).ToNot(HaveOccurred()) - - t, err := strconv.Atoi(result) - Expect(err).ToNot(HaveOccurred()) - - if i == 0 { - Expect(int32(t)).To(Equal(hpCntOnNuma0)) - } else { - Expect(int32(t)).To(Equal(hpCntOnNuma1)) - } - } - } - }, - table.Entry("[test_id:45023] verify uneven split of hugepages between 2 numa nodes", int32(2), int32(1)), - table.Entry("[test_id:45024] verify even split between 2 numa nodes", int32(1), int32(1)), - ) - }) - - Context("Verify that all performance profile parameters can be updated", func() { - var removedKernelArgs string - - hpSize2M := performancev2.HugePageSize("2M") - hpSize1G := performancev2.HugePageSize("1G") - isolated := performancev2.CPUSet("1-2") - reserved := performancev2.CPUSet("0,3") - policy := "best-effort" - - // Modify profile and verify that MCO successfully updated the node - testutils.BeforeAll(func() { - By("Modifying profile") - initialProfile = profile.DeepCopy() - - profile.Spec.HugePages = &performancev2.HugePages{ - DefaultHugePagesSize: &hpSize2M, - Pages: []performancev2.HugePage{ - { - Count: 256, - Size: hpSize2M, - }, - { - Count: 3, - Size: hpSize1G, - }, - }, - } - profile.Spec.CPU = &performancev2.CPU{ - BalanceIsolated: pointer.BoolPtr(false), - Reserved: &reserved, - Isolated: &isolated, - } - profile.Spec.NUMA = &performancev2.NUMA{ - TopologyPolicy: &policy, - } - profile.Spec.RealTimeKernel = &performancev2.RealTimeKernel{ - Enabled: pointer.BoolPtr(false), - } - - if profile.Spec.AdditionalKernelArgs == nil { - By("AdditionalKernelArgs is empty. Checking only adding new arguments") - profile.Spec.AdditionalKernelArgs = append(profile.Spec.AdditionalKernelArgs, "new-argument=test") - } else { - removedKernelArgs = profile.Spec.AdditionalKernelArgs[0] - profile.Spec.AdditionalKernelArgs = append(profile.Spec.AdditionalKernelArgs[1:], "new-argument=test") - } - - By("Verifying that mcp is ready for update") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - spec, err := json.Marshal(profile.Spec) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting when mcp finishes updates") - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - }) - - table.DescribeTable("Verify that profile parameters were updated", func(cmdFn checkFunction, parameter []string, shouldContain bool, useRegex bool) { - for _, node := range workerRTNodes { - for _, param := range parameter { - result, err := cmdFn(&node) - Expect(err).ToNot(HaveOccurred()) - matcher := ContainSubstring(param) - if useRegex { - matcher = MatchRegexp(param) - } - - if shouldContain { - Expect(result).To(matcher) - } else { - Expect(result).NotTo(matcher) - } - } - } - }, - table.Entry("[test_id:34081] verify that hugepages size and count updated", chkCmdLineFn, []string{"default_hugepagesz=2M", "hugepagesz=1G", "hugepages=3"}, true, false), - table.Entry("[test_id:28070] verify that hugepages updated (NUMA node unspecified)", chkCmdLineFn, []string{"hugepagesz=2M"}, true, false), - table.Entry("verify that the right number of hugepages 1G is available on the system", chkHugepages1GFn, []string{"3"}, true, false), - table.Entry("verify that the right number of hugepages 2M is available on the system", chkHugepages2MFn, []string{"256"}, true, false), - table.Entry("[test_id:28025] verify that cpu affinity mask was updated", chkCmdLineFn, []string{"tuned.non_isolcpus=.*9"}, true, true), - table.Entry("[test_id:28071] verify that cpu balancer disabled", chkCmdLineFn, []string{"isolcpus=domain,managed_irq,1-2"}, true, false), - table.Entry("[test_id:28071] verify that cpu balancer disabled", chkCmdLineFn, []string{"systemd.cpu_affinity=0,3"}, true, false), - // kubelet.conf changed formatting, there is a space after colons atm. Let's deal with both cases with a regex - table.Entry("[test_id:28935] verify that reservedSystemCPUs was updated", chkKubeletConfigFn, []string{`"reservedSystemCPUs": ?"0,3"`}, true, true), - table.Entry("[test_id:28760] verify that topologyManager was updated", chkKubeletConfigFn, []string{`"topologyManagerPolicy": ?"best-effort"`}, true, true), - ) - - It("[test_id:27738] should succeed to disable the RT kernel", func() { - for _, node := range workerRTNodes { - err := nodes.HasPreemptRTKernel(&node) - Expect(err).To(HaveOccurred()) - } - }) - - It("[test_id:28612]Verify that Kernel arguments can me updated (added, removed) thru performance profile", func() { - for _, node := range workerRTNodes { - cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, &node) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) - - // Verifying that new argument was added - Expect(cmdline).To(ContainSubstring("new-argument=test")) - - // Verifying that one of old arguments was removed - if removedKernelArgs != "" { - Expect(cmdline).NotTo(ContainSubstring(removedKernelArgs), "%s should be removed from /proc/cmdline", removedKernelArgs) - } - } - }) - - It("[test_id:22764] verify that by default RT kernel is disabled", func() { - conditionUpdating := machineconfigv1.MachineConfigPoolUpdating - - if profile.Spec.RealTimeKernel == nil || *profile.Spec.RealTimeKernel.Enabled == true { - Skip("Skipping test - This test expects RT Kernel to be disabled. Found it to be enabled or nil.") - } - - By("Applying changes in performance profile") - profile.Spec.RealTimeKernel = nil - spec, err := json.Marshal(profile.Spec) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - - Expect(profile.Spec.RealTimeKernel).To(BeNil(), "real time kernel setting expected in profile spec but missing") - By("Checking that the updating MCP status will consistently stay false") - Consistently(func() corev1.ConditionStatus { - return mcps.GetConditionStatus(performanceMCP, conditionUpdating) - }, 30, 5).Should(Equal(corev1.ConditionFalse)) - - for _, node := range workerRTNodes { - err := nodes.HasPreemptRTKernel(&node) - Expect(err).To(HaveOccurred()) - } - }) - - It("Reverts back all profile configuration", func() { - // return initial configuration - spec, err := json.Marshal(initialProfile.Spec) - Expect(err).ToNot(HaveOccurred()) - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - }) - }) - - // TODO: we have a dependency between tests(that in general bad practice, but saves us some tests run time), - // once we will want to run tests in the random order or without failFast we will need to refactor tests - Context("Updating of nodeSelector parameter and node labels", func() { - var mcp *machineconfigv1.MachineConfigPool - var newCnfNode *corev1.Node - - newRole := "worker-test" - newLabel := fmt.Sprintf("%s/%s", testutils.LabelRole, newRole) - newNodeSelector := map[string]string{newLabel: ""} - - testutils.BeforeAll(func() { - nonPerformancesWorkers, err := nodes.GetNonPerformancesWorkers(profile.Spec.NodeSelector) - Expect(err).ToNot(HaveOccurred()) - if len(nonPerformancesWorkers) != 0 { - newCnfNode = &nonPerformancesWorkers[0] - } - }) - - JustBeforeEach(func() { - if newCnfNode == nil { - Skip("Skipping the test - cluster does not have another available worker node ") - } - }) - - It("[test_id:28440]Verifies that nodeSelector can be updated in performance profile", func() { - nodeLabel = newNodeSelector - newCnfNode.Labels[newLabel] = "" - Expect(testclient.Client.Update(context.TODO(), newCnfNode)).ToNot(HaveOccurred()) - - By("Creating new MachineConfigPool") - mcp = mcps.New(newRole, newNodeSelector) - err = testclient.Client.Create(context.TODO(), mcp) - Expect(err).ToNot(HaveOccurred()) - - By("Updating Node Selector performance profile") - profile.Spec.NodeSelector = newNodeSelector - spec, err := json.Marshal(profile.Spec) - Expect(err).ToNot(HaveOccurred()) - - By("Applying changes in performance profile and waiting until mcp will start updating") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec", "value": %s }]`, spec)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(newRole, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting when MCP finishes updates and verifying new node has updated configuration") - mcps.WaitForCondition(newRole, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - kblcfg, err := nodes.ExecCommandOnNode(chkKubeletConfig, newCnfNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkKubeletConfig) - Expect(kblcfg).To(ContainSubstring("topologyManagerPolicy")) - - cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, newCnfNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) - Expect(cmdline).To(ContainSubstring("tuned.non_isolcpus")) - }) - - It("[test_id:27484]Verifies that node is reverted to plain worker when the extra labels are removed", func() { - By("Deleting cnf labels from the node") - for l := range profile.Spec.NodeSelector { - delete(newCnfNode.Labels, l) - } - label, err := json.Marshal(newCnfNode.Labels) - Expect(err).ToNot(HaveOccurred()) - Expect(testclient.Client.Patch(context.TODO(), newCnfNode, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/metadata/labels", "value": %s }]`, label)), - ), - )).ToNot(HaveOccurred()) - mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting when MCP Worker complete updates and verifying that node reverted back configuration") - mcps.WaitForCondition(testutils.RoleWorker, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - // Check if node is Ready - for i := range newCnfNode.Status.Conditions { - if newCnfNode.Status.Conditions[i].Type == corev1.NodeReady { - Expect(newCnfNode.Status.Conditions[i].Status).To(Equal(corev1.ConditionTrue)) - } - } - - // check that the configs reverted - err = nodes.HasPreemptRTKernel(newCnfNode) - Expect(err).To(HaveOccurred()) - - cmdline, err := nodes.ExecCommandOnNode(chkCmdLine, newCnfNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkCmdLine) - Expect(cmdline).NotTo(ContainSubstring("tuned.non_isolcpus")) - - kblcfg, err := nodes.ExecCommandOnNode(chkKubeletConfig, newCnfNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkKubeletConfig) - Expect(kblcfg).NotTo(ContainSubstring("reservedSystemCPUs")) - - Expect(profile.Spec.CPU.Reserved).NotTo(BeNil()) - reservedCPU := string(*profile.Spec.CPU.Reserved) - cpuMask, err := components.CPUListToHexMask(reservedCPU) - Expect(err).ToNot(HaveOccurred(), "failed to list in Hex %s", reservedCPU) - irqBal, err := nodes.ExecCommandOnNode(chkIrqbalance, newCnfNode) - Expect(err).ToNot(HaveOccurred(), "failed to execute %s", chkIrqbalance) - Expect(irqBal).NotTo(ContainSubstring(cpuMask)) - }) - - It("Reverts back nodeSelector and cleaning up leftovers", func() { - var selectorLabels []string - for k, v := range testutils.NodeSelectorLabels { - selectorLabels = append(selectorLabels, fmt.Sprintf(`"%s":"%s"`, k, v)) - } - nodeSelector := strings.Join(selectorLabels, ",") - Expect(testclient.Client.Patch(context.TODO(), profile, - client.RawPatch( - types.JSONPatchType, - []byte(fmt.Sprintf(`[{ "op": "replace", "path": "/spec/nodeSelector", "value": {%s} }]`, nodeSelector)), - ), - )).ToNot(HaveOccurred()) - - updatedProfile := &performancev2.PerformanceProfile{} - Eventually(func() string { - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - Expect(testclient.Client.Get(context.TODO(), key, updatedProfile)).ToNot(HaveOccurred()) - var updatedSelectorLabels []string - for k, v := range updatedProfile.Spec.NodeSelector { - updatedSelectorLabels = append(updatedSelectorLabels, fmt.Sprintf(`"%s":"%s"`, k, v)) - } - updatedNodeSelector := strings.Join(updatedSelectorLabels, ",") - return updatedNodeSelector - }, 2*time.Minute, 15*time.Second).Should(Equal(nodeSelector)) - - performanceMCP, err = mcps.GetByProfile(updatedProfile) - Expect(err).ToNot(HaveOccurred()) - Expect(testclient.Client.Delete(context.TODO(), mcp)).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - - // revert node label to have the expected value - nodeLabel = testutils.NodeSelectorLabels - }) - }) -}) - -func hugepagesPathForNode(nodeID, sizeINMb int) string { - return fmt.Sprintf("/sys/devices/system/node/node%d/hugepages/hugepages-%dkB/nr_hugepages", nodeID, sizeINMb*1024) -} - -func countHugepagesOnNode(node *corev1.Node, sizeInMb int) (int, error) { - numaInfo, err := nodes.GetNumaNodes(node) - if err != nil { - return 0, err - } - count := 0 - for i := 0; i < len(numaInfo); i++ { - nodeCmd := []string{"cat", hugepagesPathForNode(i, sizeInMb)} - result, err := nodes.ExecCommandOnNode(nodeCmd, node) - if err != nil { - return 0, err - } - t, err := strconv.Atoi(result) - if err != nil { - return 0, err - } - count += t - } - return count, nil -} diff --git a/test/e2e/pao/functests/3_performance_status/status.go b/test/e2e/pao/functests/3_performance_status/status.go deleted file mode 100644 index bc6ba8b9b..000000000 --- a/test/e2e/pao/functests/3_performance_status/status.go +++ /dev/null @@ -1,224 +0,0 @@ -package __performance_status - -import ( - "context" - "encoding/json" - - ign2types "github.com/coreos/ignition/config/v2_2/types" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - v1 "github.com/openshift/custom-resource-status/conditions/v1" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - - corev1 "k8s.io/api/core/v1" - nodev1beta1 "k8s.io/api/node/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - utilrand "k8s.io/apimachinery/pkg/util/rand" -) - -var _ = Describe("Status testing of performance profile", func() { - var ( - workerCNFNodes []corev1.Node - err error - clean func() error - ) - - BeforeEach(func() { - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - workerCNFNodes, err = nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - workerCNFNodes, err = nodes.MatchingOptionalSelector(workerCNFNodes) - Expect(err).ToNot(HaveOccurred(), "error looking for the optional selector: %v", err) - Expect(workerCNFNodes).ToNot(BeEmpty()) - // initialized clean function handler to be nil on every It execution - clean = nil - }) - - AfterEach(func() { - if clean != nil { - clean() - } - - }) - - Context("[rfe_id:28881][performance] Performance Addons detailed status", func() { - - It("[test_id:30894] Tuned status name tied to Performance Profile", func() { - profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tuned := &tunedv1.Tuned{} - err = testclient.GetWithRetry(context.TODO(), key, tuned) - Expect(err).ToNot(HaveOccurred(), "cannot find the Cluster Node Tuning Operator Tuned object "+key.String()) - tunedNamespacedname := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ProfileNamePerformance), - Namespace: components.NamespaceNodeTuningOperator, - } - tunedStatus := tunedNamespacedname.String() - Expect(profile.Status.Tuned).NotTo(BeNil()) - Expect(*profile.Status.Tuned).To(Equal(tunedStatus)) - }) - - It("[test_id:33791] Should include the generated runtime class name", func() { - profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - key := types.NamespacedName{ - Name: components.GetComponentName(profile.Name, components.ComponentNamePrefix), - Namespace: metav1.NamespaceAll, - } - runtimeClass := &nodev1beta1.RuntimeClass{} - err = testclient.GetWithRetry(context.TODO(), key, runtimeClass) - Expect(err).ToNot(HaveOccurred(), "cannot find the RuntimeClass object "+key.String()) - - Expect(profile.Status.RuntimeClass).NotTo(BeNil()) - Expect(*profile.Status.RuntimeClass).To(Equal(runtimeClass.Name)) - }) - - It("[test_id:29673] Machine config pools status tied to Performance Profile", func() { - // Creating bad MC that leads to degraded state - By("Creating bad MachineConfig") - badMC := createBadMachineConfig("bad-mc") - err = testclient.Client.Create(context.TODO(), badMC) - Expect(err).ToNot(HaveOccurred()) - - By("Wait for MCP condition to be Degraded") - profile, err := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - performanceMCP, err := mcps.GetByProfile(profile) - Expect(err).ToNot(HaveOccurred()) - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolDegraded, corev1.ConditionTrue) - mcpConditionReason := mcps.GetConditionReason(performanceMCP, machineconfigv1.MachineConfigPoolDegraded) - profileConditionMessage := profiles.GetConditionMessage(testutils.NodeSelectorLabels, v1.ConditionDegraded) - // Verify the status reason of performance profile - Expect(profileConditionMessage).To(ContainSubstring(mcpConditionReason)) - - By("Deleting bad MachineConfig and waiting when Degraded state is removed") - err = testclient.Client.Delete(context.TODO(), badMC) - Expect(err).ToNot(HaveOccurred()) - - mcps.WaitForCondition(performanceMCP, machineconfigv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - }) - - It("[test_id:40402] Tuned profile status tied to Performance Profile", func() { - // During this test we're creating additional synthetic tuned CR by invoking the createrBadTuned function. - // This synthetic tuned will look for a tuned profile which doesn't exist. - // This tuned CR will be applied on the profiles.tuned.openshift.io CR (there is such profile per node) - // which is associate to the node object with the same name. - // The connection between the node object and the tuned object is via the MachineConfigLables, worker-cnf in our case. - ns := "openshift-cluster-node-tuning-operator" - tunedName := "openshift-cause-tuned-failure" - - // Make sure to clean badTuned object even if the It threw an error - clean = func() error { - key := types.NamespacedName{ - Name: tunedName, - Namespace: ns, - } - runtimeClass := &tunedv1.Tuned{} - err := testclient.Client.Get(context.TODO(), key, runtimeClass) - // if err != nil probably the resource were already deleted - if err == nil { - testclient.Client.Delete(context.TODO(), runtimeClass) - } - return err - } - - // Creating bad Tuned object that leads to degraded state - badTuned := createBadTuned(tunedName, ns) - err = testclient.Client.Create(context.TODO(), badTuned) - Expect(err).ToNot(HaveOccurred()) - - By("Waiting for performance profile condition to be Degraded") - profiles.WaitForCondition(testutils.NodeSelectorLabels, v1.ConditionDegraded, corev1.ConditionTrue) - - By("Deleting bad Tuned and waiting when Degraded state is removed") - err = testclient.Client.Delete(context.TODO(), badTuned) - profiles.WaitForCondition(testutils.NodeSelectorLabels, v1.ConditionAvailable, corev1.ConditionTrue) - }) - }) -}) - -func createBadMachineConfig(name string) *machineconfigv1.MachineConfig { - rawIgnition, _ := json.Marshal( - &ign2types.Config{ - Ignition: ign2types.Ignition{ - Version: ign2types.MaxVersion.String(), - }, - Storage: ign2types.Storage{ - Disks: []ign2types.Disk{ - { - Device: "/one", - }, - }, - }, - }, - ) - - return &machineconfigv1.MachineConfig{ - TypeMeta: metav1.TypeMeta{ - APIVersion: machineconfigv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: map[string]string{"machineconfiguration.openshift.io/role": testutils.RoleWorkerCNF}, - UID: types.UID(utilrand.String(5)), - }, - Spec: machineconfigv1.MachineConfigSpec{ - OSImageURL: "", - Config: runtime.RawExtension{ - Raw: rawIgnition, - }, - }, - } -} - -func createBadTuned(name, ns string) *tunedv1.Tuned { - priority := uint64(20) - // include=profile-does-not-exist - // points to tuned profile which doesn't exist - data := "[main]\nsummary=A Tuned daemon profile that does not exist\ninclude=profile-does-not-exist" - - return &tunedv1.Tuned{ - TypeMeta: metav1.TypeMeta{ - APIVersion: tunedv1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: ns, - UID: types.UID(utilrand.String(5)), - }, - Spec: tunedv1.TunedSpec{ - Profile: []tunedv1.TunedProfile{ - { - Name: &name, - Data: &data, - }, - }, - Recommend: []tunedv1.TunedRecommend{ - { - MachineConfigLabels: map[string]string{"machineconfiguration.openshift.io/role": testutils.RoleWorkerCNF}, - Priority: &priority, - Profile: &name, - }, - }, - }, - } - -} diff --git a/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go b/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go deleted file mode 100644 index a6ab9058c..000000000 --- a/test/e2e/pao/functests/3_performance_status/test_suite_performance_status_test.go +++ /dev/null @@ -1,50 +0,0 @@ -//go:build !unittests -// +build !unittests - -package __performance_status_test - -import ( - "context" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/apimachinery/pkg/api/errors" - - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" - - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces" -) - -var _ = BeforeSuite(func() { - // create test namespace - err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace) - if errors.IsAlreadyExists(err) { - testlog.Warning("test namespace already exists, that is unexpected") - return - } - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) - Expect(err).ToNot(HaveOccurred()) - err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) -}) - -func TestPerformanceUpdate(t *testing.T) { - RegisterFailHandler(Fail) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("performance_status")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator Status e2e tests", rr) -} diff --git a/test/e2e/pao/functests/4_latency/latency.go b/test/e2e/pao/functests/4_latency/latency.go deleted file mode 100644 index 307ed08e9..000000000 --- a/test/e2e/pao/functests/4_latency/latency.go +++ /dev/null @@ -1,511 +0,0 @@ -package __latency - -import ( - "context" - "fmt" - "math" - "os" - "path" - "regexp" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/events" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - "k8s.io/utils/pointer" -) - -const ( - oslatTestName = "oslat" - cyclictestTestName = "cyclictest" - hwlatdetectTestName = "hwlatdetect" - defaultTestDelay = 0 - defaultTestRun = false - defaultTestRuntime = "300" - defaultMaxLatency = -1 - defaultTestCpus = -1 - minCpuAmountForOslat = 2 -) - -var ( - latencyTestDelay = defaultTestDelay - latencyTestRun = defaultTestRun - latencyTestRuntime = defaultTestRuntime - maximumLatency = defaultMaxLatency - latencyTestCpus = defaultTestCpus -) - -// LATENCY_TEST_DELAY delay the run of the binary, can be useful to give time to the CPU manager reconcile loop -// to update the default CPU pool -// LATENCY_TEST_RUN: indicates if the latency test should run -// LATENCY_TEST_RUNTIME: the amount of time in seconds that the latency test should run -// LATENCY_TEST_CPUS: the amount of CPUs the pod which run the latency test should request - -var _ = Describe("[performance] Latency Test", func() { - var workerRTNode *corev1.Node - var profile *performancev2.PerformanceProfile - var latencyTestPod *corev1.Pod - var err error - var logName string - - BeforeEach(func() { - logName = time.Now().Format("20060102150405") - - latencyTestRun, err = getLatencyTestRun() - Expect(err).ToNot(HaveOccurred()) - - latencyTestDelay, err = getLatencyTestDelay() - Expect(err).ToNot(HaveOccurred()) - - latencyTestCpus, err = getLatencyTestCpus() - Expect(err).ToNot(HaveOccurred()) - - latencyTestRuntime, err = getLatencyTestRuntime() - Expect(err).ToNot(HaveOccurred()) - - if !latencyTestRun { - Skip("Skip the latency test, the LATENCY_TEST_RUN set to false") - } - - if discovery.Enabled() && testutils.ProfileNotFound { - Skip("Discovery mode enabled, performance profile not found") - } - - profile, err = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) - Expect(err).ToNot(HaveOccurred()) - - workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) - Expect(err).ToNot(HaveOccurred(), "error looking for the optional selector: %v", err) - - Expect(workerRTNodes).ToNot(BeEmpty()) - - //At least one worker node should have cpu.Allocatable greater than the quantity requested by each test, else skip the test - workerRTNodesWithSufficientCpu := nodes.GetByCpuAllocatable(workerRTNodes, latencyTestCpus) - if len(workerRTNodesWithSufficientCpu) == 0 { - Skip("Insufficient cpu to run the test") - - } - workerRTNode = &workerRTNodesWithSufficientCpu[0] - - }) - - AfterEach(func() { - removeLogfile(workerRTNode, logName) - err = testclient.Client.Delete(context.TODO(), latencyTestPod) - if err != nil { - testlog.Error(err) - } - - err = pods.WaitForDeletion(latencyTestPod, pods.DefaultDeletionTimeout*time.Second) - if err != nil { - testlog.Error(err) - } - - maximumLatency = -1 - }) - - Context("with the oslat image", func() { - testName := oslatTestName - - BeforeEach(func() { - maximumLatency, err = getMaximumLatency(testName) - Expect(err).ToNot(HaveOccurred()) - - if profile.Spec.CPU.Isolated == nil { - Skip(fmt.Sprintf("Skip the oslat test, the profile %q does not have isolated CPUs", profile.Name)) - } - - isolatedCpus := cpuset.MustParse(string(*profile.Spec.CPU.Isolated)) - // we require at least two CPUs to run oslat test, because one CPU should be used to run the main oslat thread - // we can not use all isolated CPUs, because if reserved and isolated include all node CPUs, and reserved CPUs - // do not calculated into the Allocated, at least part of time of one of isolated CPUs will be used to run - // other node containers - // at least two isolated CPUs to run oslat + one isolated CPU used by other containers on the node = at least 3 isolated CPUs - if isolatedCpus.Size() < (minCpuAmountForOslat + 1) { - Skip(fmt.Sprintf("Skip the oslat test, the profile %q has less than %d isolated CPUs", profile.Name, minCpuAmountForOslat)) - } - if latencyTestCpus < minCpuAmountForOslat && latencyTestCpus != defaultTestCpus { - Skip(fmt.Sprintf("Skip the oslat test, LATENCY_TEST_CPUS is less than the minimum CPUs amount %d", minCpuAmountForOslat)) - } - }) - - It("should succeed", func() { - oslatArgs := []string{ - fmt.Sprintf("-runtime=%s", latencyTestRuntime), - } - latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, oslatArgs, logName) - createLatencyTestPod(latencyTestPod, workerRTNode, logName) - logFileContent := getLogFile(workerRTNode, logName) - - // verify the maximum latency only when it requested, because this value can be very different - // on different systems - if maximumLatency == -1 { - testlog.Info(logFileContent) - Skip("no maximum latency value provided, skip buckets latency check") - } - - latencies := extractLatencyValues(logName, `Maximum:\t*([\s\d]*)\(us\)`, workerRTNode) - latenciesList := strings.Split(latencies, " ") - for _, lat := range latenciesList { - if lat == "" { - continue - } - curr, err := strconv.Atoi(lat) - Expect(err).ToNot(HaveOccurred()) - - Expect(curr < maximumLatency).To(BeTrue(), "The current latency %d is bigger than the expected one %d : \n %s", curr, maximumLatency, logFileContent) - - } - //Use Println here so that this output will be displayed upon executing the test binary - fmt.Println(logFileContent) - }) - }) - - Context("with the cyclictest image", func() { - testName := cyclictestTestName - - BeforeEach(func() { - maximumLatency, err = getMaximumLatency(testName) - Expect(err).ToNot(HaveOccurred()) - - if profile.Spec.CPU.Isolated == nil { - Skip(fmt.Sprintf("Skip the cyclictest test, the profile %q does not have isolated CPUs", profile.Name)) - } - }) - - It("should succeed", func() { - cyclictestArgs := []string{ - fmt.Sprintf("-duration=%s", latencyTestRuntime), - } - latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, cyclictestArgs, logName) - createLatencyTestPod(latencyTestPod, workerRTNode, logName) - logFileContent := getLogFile(workerRTNode, logName) - - // verify the maximum latency only when it requested, because this value can be very different - // on different systems - if maximumLatency == -1 { - testlog.Info(logFileContent) - Skip("no maximum latency value provided, skip buckets latency check") - } - latencies := extractLatencyValues(logName, `# Max Latencies:\t*\s*(.*)\s*\t*`, workerRTNode) - for _, lat := range strings.Split(latencies, " ") { - if lat == "" { - continue - } - - curr, err := strconv.Atoi(lat) - Expect(err).ToNot(HaveOccurred()) - - Expect(curr < maximumLatency).To(BeTrue(), "The current latency %d is bigger than the expected one %d : \n %s", curr, maximumLatency, logFileContent) - - } - //Use Println here so that this output will be displayed upon executing the test binary - fmt.Println(logFileContent) - }) - }) - - Context("with the hwlatdetect image", func() { - testName := hwlatdetectTestName - - BeforeEach(func() { - maximumLatency, err = getMaximumLatency(testName) - Expect(err).ToNot(HaveOccurred()) - }) - - It("should succeed", func() { - hardLimit := maximumLatency - if hardLimit == -1 { - // This value should be > than max latency, - // in order to prevent the hwlatdetect return with error 1 in case latency value is bigger than expected. - // in case latency value is bigger than expected, it will be handled on different flow. - hardLimit = 1000 - } - - hwlatdetectArgs := []string{ - fmt.Sprintf("-hardlimit=%d", hardLimit), - fmt.Sprintf("-duration=%s", latencyTestRuntime), - } - - // set the maximum latency for the test if needed - if maximumLatency != -1 { - hwlatdetectArgs = append(hwlatdetectArgs, fmt.Sprintf("-threshold=%d", maximumLatency)) - } - - latencyTestPod = getLatencyTestPod(profile, workerRTNode, testName, hwlatdetectArgs, logName) - createLatencyTestPod(latencyTestPod, workerRTNode, logName) - logFileContent := getLogFile(workerRTNode, logName) - - // here we don't need to parse the latency values. - // hwlatdetect will do that for us and exit with error if needed. - //Use Println here so that this output will be displayed upon executing the test binary - fmt.Println(logFileContent) - }) - }) -}) - -func getLatencyTestRun() (bool, error) { - if latencyTestRunEnv, ok := os.LookupEnv("LATENCY_TEST_RUN"); ok { - val, err := strconv.ParseBool(latencyTestRunEnv) - if err != nil { - return val, fmt.Errorf("the environment variable LATENCY_TEST_RUN has incorrect value %q: %w", latencyTestRunEnv, err) - } - return val, nil - } - return defaultTestRun, nil -} - -func getLatencyTestRuntime() (string, error) { - if latencyTestRuntimeEnv, ok := os.LookupEnv("LATENCY_TEST_RUNTIME"); ok { - val, err := strconv.Atoi(latencyTestRuntimeEnv) - if err != nil { - return latencyTestRuntimeEnv, fmt.Errorf("the environment variable LATENCY_TEST_RUNTIME has incorrect value %q, it must be a positive integer with maximum value of %d", latencyTestRuntimeEnv, math.MaxInt32) - } - if val < 1 || val > math.MaxInt32 { - return "", fmt.Errorf("the environment variable LATENCY_TEST_RUNTIME has an invalid number %q, it must be a positive integer with maximum value of %d", latencyTestRuntimeEnv, math.MaxInt32) - } - return latencyTestRuntimeEnv, nil - } - return defaultTestRuntime, nil -} - -func getLatencyTestDelay() (int, error) { - if latencyTestDelayEnv, ok := os.LookupEnv("LATENCY_TEST_DELAY"); ok { - val, err := strconv.Atoi(latencyTestDelayEnv) - if err != nil { - return val, fmt.Errorf("the environment variable LATENCY_TEST_DELAY has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", latencyTestDelayEnv, math.MaxInt32, err) - } - if val < 0 || val > math.MaxInt32 { - return val, fmt.Errorf("the environment variable LATENCY_TEST_DELAY has an invalid number %q, it must be a non-negative integer with maximum value of %d", latencyTestDelayEnv, math.MaxInt32) - } - return val, nil - } - return defaultTestDelay, nil -} - -func getLatencyTestCpus() (int, error) { - if latencyTestCpusEnv, ok := os.LookupEnv("LATENCY_TEST_CPUS"); ok { - val, err := strconv.Atoi(latencyTestCpusEnv) - if err != nil { - return val, fmt.Errorf("the environment variable LATENCY_TEST_CPUS has incorrect value %q, it must be a positive integer with maximum value of %d: %w", latencyTestCpusEnv, math.MaxInt32, err) - } - if val < 0 || val > math.MaxInt32 { - return val, fmt.Errorf("the environment variable LATENCY_TEST_CPUS has an invalid number %q, it must be a positive integer with maximum value of %d", latencyTestCpusEnv, math.MaxInt32) - } - return val, nil - } - return defaultTestCpus, nil -} - -// getMaximumLatency should look for one of the following environment variables: -// OSLAT_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us -// CYCLICTEST_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us -// HWLATDETECT_MAXIMUM_LATENCY: the expected maximum latency for all buckets in us -// MAXIMUM_LATENCY: unified expected maximum latency for all tests -func getMaximumLatency(testName string) (int, error) { - var err error - val := defaultMaxLatency - if unifiedMaxLatencyEnv, ok := os.LookupEnv("MAXIMUM_LATENCY"); ok { - val, err = strconv.Atoi(unifiedMaxLatencyEnv) - if err != nil { - return val, fmt.Errorf("the environment variable MAXIMUM_LATENCY has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", unifiedMaxLatencyEnv, math.MaxInt32, err) - } - if val < 0 || val > math.MaxInt32 { - return val, fmt.Errorf("the environment variable MAXIMUM_LATENCY has an invalid number %q, it must be a non-negative integer with maximum value of %d", unifiedMaxLatencyEnv, math.MaxInt32) - } - } - - // specific values will have precedence over the general one - envVariableName := fmt.Sprintf("%s_MAXIMUM_LATENCY", strings.ToUpper(testName)) - if maximumLatencyEnv, ok := os.LookupEnv(envVariableName); ok { - val, err = strconv.Atoi(maximumLatencyEnv) - if err != nil { - err = fmt.Errorf("the environment variable %q has incorrect value %q, it must be a non-negative integer with maximum value of %d: %w", envVariableName, maximumLatencyEnv, math.MaxInt32, err) - } - if val < 0 || val > math.MaxInt32 { - err = fmt.Errorf("the environment variable %q has an invalid number %q, it must be a non-negative integer with maximum value of %d", envVariableName, maximumLatencyEnv, math.MaxInt32) - } - } - return val, err -} - -func getLatencyTestPod(profile *performancev2.PerformanceProfile, node *corev1.Node, testName string, testSpecificArgs []string, logName string) *corev1.Pod { - runtimeClass := components.GetComponentName(profile.Name, components.ComponentNamePrefix) - testNamePrefix := fmt.Sprintf("%s-", testName) - runnerName := fmt.Sprintf("%srunner", testNamePrefix) - runnerPath := path.Join("usr", "bin", runnerName) - - if latencyTestCpus == defaultTestCpus { - // we can not use all isolated CPUs, because if reserved and isolated include all node CPUs, and reserved CPUs - // do not calculated into the Allocated, at least part of time of one of isolated CPUs will be used to run - // other node containers - cpus := cpuset.MustParse(string(*profile.Spec.CPU.Isolated)) - latencyTestCpus = cpus.Size() - 1 - } - - latencyTestRunnerArgs := []string{ - "-logtostderr=false", - "-alsologtostderr=true", - fmt.Sprintf("-log_file=/host/%s.log", logName), - } - - latencyTestRunnerArgs = append(latencyTestRunnerArgs, testSpecificArgs...) - - if latencyTestDelay > 0 { - latencyTestRunnerArgs = append(latencyTestRunnerArgs, fmt.Sprintf("-%s-start-delay=%d", testName, latencyTestDelay)) - } - - volumeTypeDirectory := corev1.HostPathDirectory - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: testNamePrefix, - Annotations: map[string]string{ - "irq-load-balancing.crio.io": "disable", - "cpu-load-balancing.crio.io": "disable", - "cpu-quota.crio.io": "disable", - }, - Namespace: testutils.NamespaceTesting, - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyNever, - RuntimeClassName: &runtimeClass, - Containers: []corev1.Container{ - { - Name: runnerName, - Image: images.Test(), - Command: []string{ - runnerPath, - }, - Args: latencyTestRunnerArgs, - Resources: corev1.ResourceRequirements{ - Limits: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse(strconv.Itoa(latencyTestCpus)), - corev1.ResourceMemory: resource.MustParse("1Gi"), - }, - }, - SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "logs", - MountPath: "/host", - }, - }, - }, - }, - NodeSelector: map[string]string{ - "kubernetes.io/hostname": node.Labels["kubernetes.io/hostname"], - }, - Volumes: []corev1.Volume{ - { - Name: "logs", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/var/log", - Type: &volumeTypeDirectory, - }, - }, - }, - }, - }, - } -} - -func logEventsForPod(testPod *corev1.Pod) { - events, err := events.GetEventsForObject(testclient.Client, testPod.Namespace, testPod.Name, string(testPod.UID)) - if err != nil { - testlog.Error(err) - } - for _, event := range events.Items { - testlog.Warningf("-> %s %s %s", event.Action, event.Reason, event.Message) - } -} - -func createLatencyTestPod(testPod *corev1.Pod, node *corev1.Node, logName string) { - err := testclient.Client.Create(context.TODO(), testPod) - Expect(err).ToNot(HaveOccurred()) - - timeout, err := strconv.Atoi(latencyTestRuntime) - Expect(err).ToNot(HaveOccurred()) - - By("Waiting two minutes to download the latencyTest image") - err = pods.WaitForPhase(testPod, corev1.PodRunning, 2*time.Minute) - if err != nil { - testlog.Error(err) - logEventsForPod(testPod) - } - Expect(err).ToNot(HaveOccurred()) - - if runtime, _ := strconv.Atoi(latencyTestRuntime); runtime > 1 { - By("Checking actual CPUs number for the running pod") - limitsCpusQuantity := testPod.Spec.Containers[0].Resources.Limits.Cpu() - RequestsCpusQuantity := testPod.Spec.Containers[0].Resources.Requests.Cpu() - //latency pod is guaranteed - Expect(isEqual(limitsCpusQuantity, latencyTestCpus)).To(BeTrue(), fmt.Sprintf("actual limits of cpus number used for the latency pod is not as set in LATENCY_TEST_CPUS, actual number is: %s", limitsCpusQuantity)) - Expect(isEqual(RequestsCpusQuantity, latencyTestCpus)).To(BeTrue(), fmt.Sprintf("actual requests of cpus number used for the latency pod is not as set in LATENCY_TEST_CPUS, actual number is: %s", RequestsCpusQuantity)) - } - - By("Waiting another two minutes to give enough time for the cluster to move the pod to Succeeded phase") - podTimeout := time.Duration(timeout + 120) - err = pods.WaitForPhase(testPod, corev1.PodSucceeded, podTimeout*time.Second) - if err != nil { - testlog.Error(err) - logEventsForPod(testPod) - } - Expect(err).ToNot(HaveOccurred(), getLogFile(node, logName)) -} - -func extractLatencyValues(logName string, exp string, node *corev1.Node) string { - out := getLogFile(node, logName) - - maximumRegex, err := regexp.Compile(exp) - Expect(err).ToNot(HaveOccurred()) - - latencies := maximumRegex.FindStringSubmatch(out) - Expect(len(latencies)).To(Equal(2)) - - return latencies[1] -} - -func getLogFile(node *corev1.Node, logName string) string { - cmd := []string{"cat", fmt.Sprintf("/rootfs/var/log/%s.log", logName)} - out, err := nodes.ExecCommandOnNode(cmd, node) - if err != nil { - testlog.Error(err) - } - return out -} - -func removeLogfile(node *corev1.Node, logName string) { - cmd := []string{"rm", "-f", fmt.Sprintf("/rootfs/var/log/%s.log", logName)} - _, err := nodes.ExecCommandOnNode(cmd, node) - if err != nil { - testlog.Error(err) - } - -} - -func isEqual(qty *resource.Quantity, amount int) bool { - return qty.CmpInt64(int64(amount)) == 0 -} diff --git a/test/e2e/pao/functests/4_latency/test_suite_latency_test.go b/test/e2e/pao/functests/4_latency/test_suite_latency_test.go deleted file mode 100644 index dfdb3c235..000000000 --- a/test/e2e/pao/functests/4_latency/test_suite_latency_test.go +++ /dev/null @@ -1,53 +0,0 @@ -//go:build !unittests -// +build !unittests - -package __latency_test - -import ( - "context" - "os" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces" - - "k8s.io/apimachinery/pkg/api/errors" - - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" -) - -var _ = BeforeSuite(func() { - Expect(testclient.ClientsEnabled).To(BeTrue()) - // create test namespace - err := testclient.Client.Create(context.TODO(), namespaces.TestingNamespace) - if errors.IsAlreadyExists(err) { - testlog.Warning("test namespace already exists, that is unexpected") - return - } - Expect(err).ToNot(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - err := testclient.Client.Delete(context.TODO(), namespaces.TestingNamespace) - Expect(err).ToNot(HaveOccurred()) - err = namespaces.WaitForDeletion(testutils.NamespaceTesting, 5*time.Minute) -}) - -func TestLatency(t *testing.T) { - RegisterFailHandler(Fail) - - testlog.Infof("KUBECONFIG=%q", os.Getenv("KUBECONFIG")) - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("latency")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator latency e2e tests", rr) -} diff --git a/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go b/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go deleted file mode 100644 index 5ae99685d..000000000 --- a/test/e2e/pao/functests/5_latency_testing/5_latency_testing_suite_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package __latency_testing_test - -import ( - "context" - "encoding/json" - "testing" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/junit" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/namespaces" - ginkgo_reporters "kubevirt.io/qe-tools/pkg/ginkgo-reporters" -) - -var prePullNamespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: "testing-prepull", - }, -} - -var _ = AfterSuite(func() { - prePullNamespaceName := prePullNamespace.Name - err := testclient.Client.Delete(context.TODO(), prePullNamespace) - testlog.Infof("deleted namespace %q err=%v", prePullNamespace.Name, err) - Expect(err).ToNot(HaveOccurred()) - err = namespaces.WaitForDeletion(prePullNamespaceName, 5*time.Minute) -}) - -func Test5LatencyTesting(t *testing.T) { - RegisterFailHandler(Fail) - - if !testclient.ClientsEnabled { - t.Fatalf("client not enabled") - } - - if err := createNamespace(); err != nil { - t.Fatalf("cannot create the namespace: %v", err) - } - - ds, err := images.PrePull(testclient.Client, images.Test(), prePullNamespace.Name, "cnf-tests") - if err != nil { - data, _ := json.Marshal(ds) // we can safely skip errors - testlog.Infof("DaemonSet %s/%s image=%q status:\n%s", ds.Namespace, ds.Name, images.Test(), string(data)) - t.Fatalf("cannot prepull image %q: %v", images.Test(), err) - } - - rr := []Reporter{} - if ginkgo_reporters.Polarion.Run { - rr = append(rr, &ginkgo_reporters.Polarion) - } - rr = append(rr, junit.NewJUnitReporter("latency_testing")) - RunSpecsWithDefaultAndCustomReporters(t, "Performance Addon Operator latency tools testing", rr) -} - -func createNamespace() error { - err := testclient.Client.Create(context.TODO(), prePullNamespace) - if errors.IsAlreadyExists(err) { - testlog.Warningf("%q namespace already exists, that is unexpected", prePullNamespace.Name) - return nil - } - testlog.Infof("created namespace %q err=%v", prePullNamespace.Name, err) - return err -} diff --git a/test/e2e/pao/functests/5_latency_testing/latency_testing.go b/test/e2e/pao/functests/5_latency_testing/latency_testing.go deleted file mode 100644 index 076f89345..000000000 --- a/test/e2e/pao/functests/5_latency_testing/latency_testing.go +++ /dev/null @@ -1,276 +0,0 @@ -package __latency_testing - -import ( - "bytes" - "fmt" - "math" - "os" - "os/exec" - "regexp" - - . "github.com/onsi/ginkgo" - "github.com/onsi/ginkgo/extensions/table" - . "github.com/onsi/gomega" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" -) - -const ( - //tool to test - oslat = "oslat" - cyclictest = "cyclictest" - hwlatdetect = "hwlatdetect" - //Environment variables names - latencyTestDelay = "LATENCY_TEST_DELAY" - latencyTestRun = "LATENCY_TEST_RUN" - latencyTestRuntime = "LATENCY_TEST_RUNTIME" - maximumLatency = "MAXIMUM_LATENCY" - oslatMaxLatency = "OSLAT_MAXIMUM_LATENCY" - hwlatdetecMaxLatency = "HWLATDETECT_MAXIMUM_LATENCY" - cyclictestMaxLatency = "CYCLICTEST_MAXIMUM_LATENCY" - latencyTestCpus = "LATENCY_TEST_CPUS" - //invalid values error messages - unexpectedError = "Unexpected error" - //incorrect values error messages - incorrectMsgPart1 = "the environment variable " - incorrectMsgPart2 = " has incorrect value" - invalidNumber = " has an invalid number" - maxInt = "2147483647" - minimumCpuForOslat = "2" - mustBePositiveInt = ".*it must be a positive integer with maximum value of " + maxInt - mustBeNonNegativeInt = ".*it must be a non-negative integer with maximum value of " + maxInt - incorrectCpuNumber = incorrectMsgPart1 + latencyTestCpus + incorrectMsgPart2 + mustBePositiveInt - invalidCpuNumber = incorrectMsgPart1 + latencyTestCpus + invalidNumber + mustBePositiveInt - incorrectDelay = incorrectMsgPart1 + latencyTestDelay + incorrectMsgPart2 + mustBeNonNegativeInt - invalidNumberDelay = incorrectMsgPart1 + latencyTestDelay + invalidNumber + mustBeNonNegativeInt - incorrectMaxLatency = incorrectMsgPart1 + maximumLatency + incorrectMsgPart2 + mustBeNonNegativeInt - invalidNumberMaxLatency = incorrectMsgPart1 + maximumLatency + invalidNumber + mustBeNonNegativeInt - incorrectOslatMaxLatency = incorrectMsgPart1 + "\"" + oslatMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt - invalidNumberOslatMaxLatency = incorrectMsgPart1 + "\"" + oslatMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt - incorrectCyclictestMaxLatency = incorrectMsgPart1 + "\"" + cyclictestMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt - invalidNumberCyclictestMaxLatency = incorrectMsgPart1 + "\"" + cyclictestMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt - incorrectHwlatdetectMaxLatency = incorrectMsgPart1 + "\"" + hwlatdetecMaxLatency + "\"" + incorrectMsgPart2 + mustBeNonNegativeInt - invalidNumberHwlatdetectMaxLatency = incorrectMsgPart1 + "\"" + hwlatdetecMaxLatency + "\"" + invalidNumber + mustBeNonNegativeInt - incorrectTestRun = incorrectMsgPart1 + latencyTestRun + incorrectMsgPart2 - incorrectRuntime = incorrectMsgPart1 + latencyTestRuntime + incorrectMsgPart2 + mustBePositiveInt - invalidNumberRuntime = incorrectMsgPart1 + latencyTestRuntime + invalidNumber + mustBePositiveInt - //success messages regex - success = `SUCCESS.*1 Passed.*0 Failed.*2 Skipped` - //failure messages regex - latencyFail = `The current latency .* is bigger than the expected one` - fail = `FAIL.*0 Passed.*1 Failed.*2 Skipped` - //hwlatdetect fail message regex - hwlatdetectFail = `Samples exceeding threshold: [^0]` - //skip messages regex - skipTestRun = `Skip the latency test, the LATENCY_TEST_RUN set to false` - skipMaxLatency = `no maximum latency value provided, skip buckets latency check` - skipOslatCpuNumber = `Skip the oslat test, LATENCY_TEST_CPUS is less than the minimum CPUs amount ` + minimumCpuForOslat - skip = `SUCCESS.*0 Passed.*0 Failed.*3 Skipped` - skipInsufficientCpu = `Insufficient cpu to run the test` - - //used values parameters - guaranteedLatency = "20000" - negativeTesting = false - positiveTesting = true -) - -//Struct to hold each test parameters -type latencyTest struct { - testDelay string - testRun string - testRuntime string - testMaxLatency string - oslatMaxLatency string - cyclictestMaxLatency string - hwlatdetectMaxLatency string - testCpus string - outputMsgs []string - toolToTest string -} - -var _ = table.DescribeTable("Test latency measurement tools tests", func(testGroup []latencyTest, isPositiveTest bool) { - for _, test := range testGroup { - clearEnv() - testDescription := setEnvAndGetDescription(test) - By(testDescription) - if _, err := os.Stat("../../build/_output/bin/latency-e2e.test"); os.IsNotExist(err) { - Skip("The executable test file does not exist , skipping the test.") - } - output, err := exec.Command("../../build/_output/bin/latency-e2e.test", "-ginkgo.focus", test.toolToTest).Output() - if err != nil { - //we don't log Error level here because the test might be a negative check - testlog.Info(err.Error()) - } - - ok, matchErr := regexp.MatchString(skipInsufficientCpu, string(output)) - if matchErr != nil { - testlog.Error(matchErr.Error()) - } - if ok { - testlog.Info(skipInsufficientCpu) - continue - } - - if isPositiveTest { - if err != nil { - testlog.Error(err.Error()) - } - Expect(string(output)).NotTo(MatchRegexp(unexpectedError), "Unexpected error was detected in a positve test") - //Check runtime argument in the pod's log only if the tool is expected to be executed - ok, matchErr := regexp.MatchString(success, string(output)) - if matchErr != nil { - testlog.Error(matchErr.Error()) - } - if ok { - var commandRegex string - if test.toolToTest == oslat { - commandRegex = fmt.Sprintf("Running the oslat command with arguments .*--duration %s", test.testRuntime) - } - if test.toolToTest == cyclictest { - commandRegex = fmt.Sprintf("running the cyclictest command with arguments .*-D %s", test.testRuntime) - } - if test.toolToTest == hwlatdetect { - commandRegex = fmt.Sprintf("running the hwlatdetect command with arguments .*--duration %s", test.testRuntime) - } - Expect(string(output)).To(MatchRegexp(commandRegex), "The output of the executed tool is not as expected") - } - } - for _, msg := range test.outputMsgs { - Expect(string(output)).To(MatchRegexp(msg), "The output of the executed tool is not as expected") - } - } -}, - table.Entry("[test_id:42851] Latency tools shouldn't run with default environment variables values", []latencyTest{{outputMsgs: []string{skip, skipTestRun}}}, positiveTesting), - table.Entry("[test_id:42850] Oslat - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(oslat), positiveTesting), - table.Entry("[test_id:42853] Oslat - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(oslat), negativeTesting), - table.Entry("[test_id:42115] Cyclictest - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(cyclictest), positiveTesting), - table.Entry("[test_id:42852] Cyclictest - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(cyclictest), negativeTesting), - table.Entry("[test_id:42849] Hwlatdetect - Verify that the tool is working properly with valid environment variables values", getValidValuesTests(hwlatdetect), positiveTesting), - table.Entry("[test_id:42856] Hwlatdetect - Verify that the latency tool test should print an expected error message when passing invalid environment variables values", getNegativeTests(hwlatdetect), negativeTesting), -) - -func setEnvAndGetDescription(tst latencyTest) string { - sb := bytes.NewBufferString("") - testName := tst.toolToTest - if tst.toolToTest == "" { - testName = "latency tools" - } - fmt.Fprintf(sb, "Run %s test : \n", testName) - nonDefaultValues := false - if tst.testDelay != "" { - setEnvWriteDescription(latencyTestDelay, tst.testDelay, sb, &nonDefaultValues) - } - if tst.testRun != "" { - setEnvWriteDescription(latencyTestRun, tst.testRun, sb, &nonDefaultValues) - } - if tst.testRuntime != "" { - setEnvWriteDescription(latencyTestRuntime, tst.testRuntime, sb, &nonDefaultValues) - } - if tst.testMaxLatency != "" { - setEnvWriteDescription(maximumLatency, tst.testMaxLatency, sb, &nonDefaultValues) - } - if tst.oslatMaxLatency != "" { - setEnvWriteDescription(oslatMaxLatency, tst.oslatMaxLatency, sb, &nonDefaultValues) - } - if tst.cyclictestMaxLatency != "" { - setEnvWriteDescription(cyclictestMaxLatency, tst.cyclictestMaxLatency, sb, &nonDefaultValues) - } - if tst.hwlatdetectMaxLatency != "" { - setEnvWriteDescription(hwlatdetecMaxLatency, tst.hwlatdetectMaxLatency, sb, &nonDefaultValues) - } - if tst.testCpus != "" { - setEnvWriteDescription(latencyTestCpus, tst.testCpus, sb, &nonDefaultValues) - } - if !nonDefaultValues { - fmt.Fprint(sb, "With default values of the environment variables") - } - - return sb.String() -} - -func setEnvWriteDescription(envVar string, val string, sb *bytes.Buffer, flag *bool) { - os.Setenv(envVar, val) - fmt.Fprintf(sb, "%s = %s \n", envVar, val) - *flag = true -} - -func clearEnv() { - os.Unsetenv(latencyTestDelay) - os.Unsetenv(latencyTestRun) - os.Unsetenv(latencyTestRuntime) - os.Unsetenv(maximumLatency) - os.Unsetenv(oslatMaxLatency) - os.Unsetenv(cyclictestMaxLatency) - os.Unsetenv(hwlatdetecMaxLatency) - os.Unsetenv(latencyTestCpus) -} - -func getValidValuesTests(toolToTest string) []latencyTest { - var testSet []latencyTest - - //testRuntime: let runtime be 10 seconds for most of the tests and not less, that is to let the tools - //have their time to measure latency properly hence stabilizing the tests - //testCpus: for tests that expect a success output message, note that an even CPU number is needed, otherwise the test would fail with SMTAlignmentError - testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "2", outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "6", outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: "1", testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: "60", testRun: "true", testRuntime: "2", testMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - - if toolToTest != hwlatdetect { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "1", outputMsgs: []string{skip, skipMaxLatency}, toolToTest: toolToTest}) - } - if toolToTest == oslat { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", oslatMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", oslatMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: guaranteedLatency, testCpus: "1", outputMsgs: []string{skip, skipOslatCpuNumber}, toolToTest: toolToTest}) - } - if toolToTest == cyclictest { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", cyclictestMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", cyclictestMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - - } - if toolToTest == hwlatdetect { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", testMaxLatency: "1", hwlatdetectMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", hwlatdetectMaxLatency: guaranteedLatency, outputMsgs: []string{success}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "10", outputMsgs: []string{success}, toolToTest: toolToTest}) - } - return testSet -} - -func getNegativeTests(toolToTest string) []latencyTest { - var testSet []latencyTest - latencyFailureMsg := latencyFail - if toolToTest == hwlatdetect { - latencyFailureMsg = hwlatdetectFail - } - //TODO: add test to check odd CPU request. - testSet = append(testSet, latencyTest{testDelay: "0", testRun: "true", testRuntime: "5", testMaxLatency: "1", outputMsgs: []string{latencyFailureMsg, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "yes", testRuntime: "5", testMaxLatency: "1", outputMsgs: []string{incorrectTestRun, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberRuntime, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "-1", testMaxLatency: "1", outputMsgs: []string{invalidNumberRuntime, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "5", testMaxLatency: "-2", outputMsgs: []string{invalidNumberMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "1H", outputMsgs: []string{incorrectRuntime, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "&", outputMsgs: []string{incorrectMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: "J", testRun: "true", outputMsgs: []string{incorrectDelay, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: fmt.Sprint(math.MaxInt32 + 1), testRun: "true", outputMsgs: []string{invalidNumberDelay, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testDelay: "-5", testRun: "true", outputMsgs: []string{invalidNumberDelay, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "1", testCpus: "p", outputMsgs: []string{incorrectCpuNumber, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testMaxLatency: "1", testCpus: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidCpuNumber, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", testCpus: "-1", outputMsgs: []string{invalidCpuNumber, fail}, toolToTest: toolToTest}) - if toolToTest == oslat { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: "&", outputMsgs: []string{incorrectOslatMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberOslatMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", oslatMaxLatency: "-3", outputMsgs: []string{invalidNumberOslatMaxLatency, fail}, toolToTest: toolToTest}) - } - if toolToTest == cyclictest { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: "&", outputMsgs: []string{incorrectCyclictestMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberCyclictestMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", cyclictestMaxLatency: "-3", outputMsgs: []string{invalidNumberCyclictestMaxLatency, fail}, toolToTest: toolToTest}) - } - if toolToTest == hwlatdetect { - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: "&", outputMsgs: []string{incorrectHwlatdetectMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: fmt.Sprint(math.MaxInt32 + 1), outputMsgs: []string{invalidNumberHwlatdetectMaxLatency, fail}, toolToTest: toolToTest}) - testSet = append(testSet, latencyTest{testRun: "true", testRuntime: "2", hwlatdetectMaxLatency: "-3", outputMsgs: []string{invalidNumberHwlatdetectMaxLatency, fail}, toolToTest: toolToTest}) - } - return testSet -} diff --git a/test/e2e/pao/functests/README.txt b/test/e2e/pao/functests/README.txt deleted file mode 100644 index ec620be8a..000000000 --- a/test/e2e/pao/functests/README.txt +++ /dev/null @@ -1,5 +0,0 @@ -HEADS UP! - -We have 2 test suites here, one running the "normal" performance tests, the other one performance profile update tests. -The latter should run AFTER the former, and tests are executed in order of filenames. -So be careful with renaming existing or adding new suites. \ No newline at end of file diff --git a/test/e2e/pao/functests/test.go b/test/e2e/pao/functests/test.go deleted file mode 100644 index 56e540407..000000000 --- a/test/e2e/pao/functests/test.go +++ /dev/null @@ -1 +0,0 @@ -package test diff --git a/test/e2e/pao/functests/utils/clean/clean.go b/test/e2e/pao/functests/utils/clean/clean.go deleted file mode 100644 index c4270ab9e..000000000 --- a/test/e2e/pao/functests/utils/clean/clean.go +++ /dev/null @@ -1,70 +0,0 @@ -package clean - -import ( - "context" - "fmt" - "os" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/mcps" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - mcv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" -) - -var cleanPerformance bool - -func init() { - clean, found := os.LookupEnv("CLEAN_PERFORMANCE_PROFILE") - if !found || clean != "false" { - cleanPerformance = true - } -} - -// All deletes any leftovers created when running the performance tests. -func All() { - if !cleanPerformance { - testlog.Info("Performance cleaning disabled, skipping") - return - } - - perfProfile := performancev2.PerformanceProfile{} - err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: utils.PerformanceProfileName}, &perfProfile) - if errors.IsNotFound(err) { - return - } - Expect(err).ToNot(HaveOccurred(), "Failed to find perf profile") - mcpLabel := profile.GetMachineConfigLabel(&perfProfile) - key, value := components.GetFirstKeyAndValue(mcpLabel) - mcpsByLabel, err := mcps.GetByLabel(key, value) - Expect(err).ToNot(HaveOccurred(), "Failed getting MCP") - Expect(len(mcpsByLabel)).To(Equal(1), fmt.Sprintf("Unexpected number of MCPs found: %v", len(mcpsByLabel))) - - performanceMCP := &mcpsByLabel[0] - - err = testclient.Client.Delete(context.TODO(), &perfProfile) - Expect(err).ToNot(HaveOccurred(), "Failed to delete perf profile") - - By("Waiting for MCP starting to update") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdating, corev1.ConditionTrue) - - By("Waiting for MCP being updated") - mcps.WaitForCondition(performanceMCP.Name, mcv1.MachineConfigPoolUpdated, corev1.ConditionTrue) - profileKey := types.NamespacedName{ - Name: perfProfile.Name, - Namespace: perfProfile.Namespace, - } - err = profiles.WaitForDeletion(profileKey, 60*time.Second) - Expect(err).ToNot(HaveOccurred(), "Failed to wait for perf profile deletion") -} diff --git a/test/e2e/pao/functests/utils/client/clients.go b/test/e2e/pao/functests/utils/client/clients.go deleted file mode 100644 index 87343fbbb..000000000 --- a/test/e2e/pao/functests/utils/client/clients.go +++ /dev/null @@ -1,122 +0,0 @@ -package client - -import ( - "context" - "time" - - . "github.com/onsi/gomega" - - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog" - - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/config" - - configv1 "github.com/openshift/api/config/v1" - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - mcov1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" - operatorsv1alpha1 "github.com/operator-framework/operator-lifecycle-manager/pkg/api/apis/operators/v1alpha1" - apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" - - performancev1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1" - performancev1alpha1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v1alpha1" - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" -) - -var ( - // Client defines the API client to run CRUD operations, that will be used for testing - Client client.Client - // K8sClient defines k8s client to run subresource operations, for example you should use it to get pod logs - K8sClient *kubernetes.Clientset - // ClientsEnabled tells if the client from the package can be used - ClientsEnabled bool -) - -func init() { - // Setup Scheme for all resources - if err := performancev2.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := performancev1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := performancev1alpha1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := configv1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := mcov1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := tunedv1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := apiextensionsv1beta1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - if err := operatorsv1alpha1.AddToScheme(scheme.Scheme); err != nil { - klog.Exit(err.Error()) - } - - var err error - Client, err = New() - if err != nil { - testlog.Info("Failed to initialize client, check the KUBECONFIG env variable", err.Error()) - ClientsEnabled = false - return - } - K8sClient, err = NewK8s() - if err != nil { - testlog.Info("Failed to initialize k8s client, check the KUBECONFIG env variable", err.Error()) - ClientsEnabled = false - return - } - ClientsEnabled = true -} - -// New returns a controller-runtime client. -func New() (client.Client, error) { - cfg, err := config.GetConfig() - if err != nil { - return nil, err - } - - c, err := client.New(cfg, client.Options{}) - return c, err -} - -// NewK8s returns a kubernetes clientset -func NewK8s() (*kubernetes.Clientset, error) { - cfg, err := config.GetConfig() - if err != nil { - return nil, err - } - - clientset, err := kubernetes.NewForConfig(cfg) - if err != nil { - klog.Exit(err.Error()) - } - return clientset, nil -} - -func GetWithRetry(ctx context.Context, key client.ObjectKey, obj client.Object) error { - var err error - EventuallyWithOffset(1, func() error { - err = Client.Get(ctx, key, obj) - if err != nil { - testlog.Infof("Getting %s failed, retrying: %v", key.Name, err) - } - return err - }, 1*time.Minute, 10*time.Second).ShouldNot(HaveOccurred(), "Max numbers of retries getting %v reached", key) - return err -} diff --git a/test/e2e/pao/functests/utils/cluster/cluster.go b/test/e2e/pao/functests/utils/cluster/cluster.go deleted file mode 100644 index 9e0b091f0..000000000 --- a/test/e2e/pao/functests/utils/cluster/cluster.go +++ /dev/null @@ -1,31 +0,0 @@ -package cluster - -import ( - "context" - "time" - - corev1 "k8s.io/api/core/v1" - "sigs.k8s.io/controller-runtime/pkg/client" - - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" -) - -// IsSingleNode validates if the environment is single node cluster -func IsSingleNode() (bool, error) { - nodes := &corev1.NodeList{} - if err := testclient.Client.List(context.TODO(), nodes, &client.ListOptions{}); err != nil { - return false, err - } - return len(nodes.Items) == 1, nil -} - -// ComputeTestTimeout returns the desired timeout for a test based on a given base timeout. -// If the tested cluster is Single-Node it needs more time to react (due to being highly loaded) so we double the given timeout. -func ComputeTestTimeout(baseTimeout time.Duration, isSno bool) time.Duration { - testTimeout := baseTimeout - if isSno { - testTimeout += baseTimeout - } - - return testTimeout -} diff --git a/test/e2e/pao/functests/utils/consts.go b/test/e2e/pao/functests/utils/consts.go deleted file mode 100644 index ffeef99a9..000000000 --- a/test/e2e/pao/functests/utils/consts.go +++ /dev/null @@ -1,99 +0,0 @@ -package utils - -import ( - "fmt" - "os" - "strings" - - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/discovery" -) - -// RoleWorkerCNF contains role name of cnf worker nodes -var RoleWorkerCNF string - -// NodeSelectorLabels contains the node labels the perfomance profile should match -var NodeSelectorLabels map[string]string - -// PerformanceProfileName contains the name of the PerformanceProfile created for tests -// or an existing profile when discover mode is enabled -var PerformanceProfileName string - -// NodesSelector represents the label selector used to filter impacted nodes. -var NodesSelector string - -// ProfileNotFound is true when discovery mode is enabled and no valid profile was found -var ProfileNotFound bool - -func init() { - RoleWorkerCNF = os.Getenv("ROLE_WORKER_CNF") - if RoleWorkerCNF == "" { - RoleWorkerCNF = "worker-cnf" - } - - PerformanceProfileName = os.Getenv("PERF_TEST_PROFILE") - if PerformanceProfileName == "" { - PerformanceProfileName = "performance" - } - - NodesSelector = os.Getenv("NODES_SELECTOR") - - NodeSelectorLabels = map[string]string{ - fmt.Sprintf("%s/%s", LabelRole, RoleWorkerCNF): "", - } - - if discovery.Enabled() { - profile, err := discovery.GetDiscoveryPerformanceProfile(NodesSelector) - if err == discovery.ErrProfileNotFound { - ProfileNotFound = true - return - } - - if err != nil { - fmt.Println("Failed to find profile in discovery mode", err) - ProfileNotFound = true - return - } - - PerformanceProfileName = profile.Name - - NodeSelectorLabels = profile.Spec.NodeSelector - if NodesSelector != "" { - keyValue := strings.Split(NodesSelector, "=") - if len(keyValue) == 1 { - keyValue = append(keyValue, "") - } - NodeSelectorLabels[keyValue[0]] = keyValue[1] - } - } -} - -const ( - // RoleWorker contains the worker role - RoleWorker = "worker" - // RoleMaster contains the master role - RoleMaster = "master" -) - -const ( - // LabelRole contains the key for the role label - LabelRole = "node-role.kubernetes.io" - // LabelHostname contains the key for the hostname label - LabelHostname = "kubernetes.io/hostname" -) - -const ( - // NamespaceMachineConfigOperator contains the namespace of the machine-config-opereator - NamespaceMachineConfigOperator = "openshift-machine-config-operator" - // NamespaceTesting contains the name of the testing namespace - NamespaceTesting = "performance-addon-operators-testing" -) - -const ( - // FilePathKubeletConfig contains the kubelet.conf file path - FilePathKubeletConfig = "/etc/kubernetes/kubelet.conf" -) - -const ( - // ContainerMachineConfigDaemon contains the name of the machine-config-daemon container - ContainerMachineConfigDaemon = "machine-config-daemon" -) diff --git a/test/e2e/pao/functests/utils/daemonset/daemonset.go b/test/e2e/pao/functests/utils/daemonset/daemonset.go deleted file mode 100644 index f0e83c803..000000000 --- a/test/e2e/pao/functests/utils/daemonset/daemonset.go +++ /dev/null @@ -1,48 +0,0 @@ -package daemonset - -import ( - "context" - "time" - - appsv1 "k8s.io/api/apps/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/wait" - - "sigs.k8s.io/controller-runtime/pkg/client" - - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" -) - -func WaitToBeRunning(cli client.Client, namespace, name string) error { - return WaitToBeRunningWithTimeout(cli, namespace, name, 5*time.Minute) -} - -func WaitToBeRunningWithTimeout(cli client.Client, namespace, name string, timeout time.Duration) error { - testlog.Infof("wait for the daemonset %q %q to be running", namespace, name) - return wait.PollImmediate(10*time.Second, timeout, func() (bool, error) { - return IsRunning(cli, namespace, name) - }) -} - -func GetByName(cli client.Client, namespace, name string) (*appsv1.DaemonSet, error) { - key := client.ObjectKey{ - Namespace: namespace, - Name: name, - } - var ds appsv1.DaemonSet - err := cli.Get(context.TODO(), key, &ds) - return &ds, err -} - -func IsRunning(cli client.Client, namespace, name string) (bool, error) { - ds, err := GetByName(cli, namespace, name) - if err != nil { - if k8serrors.IsNotFound(err) { - testlog.Warningf("daemonset %q %q not found - retrying", namespace, name) - return false, nil - } - return false, err - } - testlog.Infof("daemonset %q %q desired %d scheduled %d ready %d", namespace, name, ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady) - return (ds.Status.DesiredNumberScheduled > 0 && ds.Status.DesiredNumberScheduled == ds.Status.NumberReady), nil -} diff --git a/test/e2e/pao/functests/utils/discovery/discovery.go b/test/e2e/pao/functests/utils/discovery/discovery.go deleted file mode 100644 index bbbd9f8e7..000000000 --- a/test/e2e/pao/functests/utils/discovery/discovery.go +++ /dev/null @@ -1,87 +0,0 @@ -package discovery - -import ( - "context" - "fmt" - "os" - "strconv" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/profiles" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/labels" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -var ErrProfileNotFound = fmt.Errorf("profile not found in discovery mode") - -// ConditionIterator is the function that accepts element of a PerformanceProfile and returns boolean -type ConditionIterator func(performancev2.PerformanceProfile) bool - -// Enabled indicates whether test discovery mode is enabled. -func Enabled() bool { - discoveryMode, _ := strconv.ParseBool(os.Getenv("DISCOVERY_MODE")) - return discoveryMode -} - -// GetDiscoveryPerformanceProfile returns an existing profile matching nodesSelector, if nodesSelector is set. -// Otherwise, it returns an existing profile with the most nodes using it. -// In case no profile exists - return nil -func GetDiscoveryPerformanceProfile(nodesSelector string) (*performancev2.PerformanceProfile, error) { - performanceProfiles, err := profiles.All() - if err != nil { - return nil, err - } - return getDiscoveryPerformanceProfile(performanceProfiles.Items, nodesSelector) -} - -// GetFilteredDiscoveryPerformanceProfile returns an existing profile in the cluster with the most nodes using it -// from a a filtered profiles list by the filter function passed as an argument. -// In case no profile exists - return nil -func GetFilteredDiscoveryPerformanceProfile(iterator ConditionIterator) (*performancev2.PerformanceProfile, error) { - performanceProfiles, err := profiles.All() - if err != nil { - return nil, err - } - return getDiscoveryPerformanceProfile(filter(performanceProfiles.Items, iterator), "") -} - -func getDiscoveryPerformanceProfile(performanceProfiles []performancev2.PerformanceProfile, nodesSelector string) (*performancev2.PerformanceProfile, error) { - var currentProfile *performancev2.PerformanceProfile = nil - maxNodesNumber := 0 - for _, profile := range performanceProfiles { - selector := labels.SelectorFromSet(profile.Spec.NodeSelector) - - profileNodes := &corev1.NodeList{} - if err := testclient.Client.List(context.TODO(), profileNodes, &client.ListOptions{LabelSelector: selector}); err != nil { - return nil, err - } - - if nodesSelector != "" { - if selector.String() == nodesSelector { - return &profile, nil - } - } - - if len(profileNodes.Items) > maxNodesNumber { - currentProfile = &profile - maxNodesNumber = len(profileNodes.Items) - } - } - - if currentProfile == nil { - return nil, ErrProfileNotFound - } - return currentProfile, nil -} - -func filter(performanceProfiles []performancev2.PerformanceProfile, iterator ConditionIterator) []performancev2.PerformanceProfile { - var result = make([]performancev2.PerformanceProfile, 0) - for _, profile := range performanceProfiles { - if iterator(profile) { - result = append(result, profile) - } - } - return result -} diff --git a/test/e2e/pao/functests/utils/events/events.go b/test/e2e/pao/functests/utils/events/events.go deleted file mode 100644 index f3aee6a46..000000000 --- a/test/e2e/pao/functests/utils/events/events.go +++ /dev/null @@ -1,19 +0,0 @@ -package events - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - - "sigs.k8s.io/controller-runtime/pkg/client" -) - -func GetEventsForObject(cli client.Client, namespace, name, uid string) (corev1.EventList, error) { - eventList := corev1.EventList{} - match := client.MatchingFields{ - "involvedObject.name": name, - "involvedObject.uid": uid, - } - err := cli.List(context.TODO(), &eventList, &client.ListOptions{Namespace: namespace}, match) - return eventList, err -} diff --git a/test/e2e/pao/functests/utils/images/images.go b/test/e2e/pao/functests/utils/images/images.go deleted file mode 100644 index eb3241d3e..000000000 --- a/test/e2e/pao/functests/utils/images/images.go +++ /dev/null @@ -1,27 +0,0 @@ -package images - -import ( - "fmt" - "os" -) - -var registry string -var cnfTestsImage string - -func init() { - registry = os.Getenv("IMAGE_REGISTRY") - cnfTestsImage = os.Getenv("CNF_TESTS_IMAGE") - - if cnfTestsImage == "" { - cnfTestsImage = "cnf-tests:4.9" - } - - if registry == "" { - registry = "quay.io/openshift-kni/" - } -} - -// Test returns the image to be used for tests -func Test() string { - return fmt.Sprintf("%s%s", registry, cnfTestsImage) -} diff --git a/test/e2e/pao/functests/utils/images/prepull.go b/test/e2e/pao/functests/utils/images/prepull.go deleted file mode 100644 index fa9f2db4c..000000000 --- a/test/e2e/pao/functests/utils/images/prepull.go +++ /dev/null @@ -1,94 +0,0 @@ -package images - -import ( - "context" - "encoding/json" - "os" - "strconv" - "time" - - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "sigs.k8s.io/controller-runtime/pkg/client" - - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - testds "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/daemonset" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" -) - -const ( - PrePullPrefix = "prepull" - PrePullDefaultTimeoutMinutes = "5" -) - -// GetPullTimeout returns the pull timeout -func GetPullTimeout() (time.Duration, error) { - prePullTimeoutMins, ok := os.LookupEnv("PREPULL_IMAGE_TIMEOUT_MINUTES") - if !ok { - prePullTimeoutMins = PrePullDefaultTimeoutMinutes - } - timeout, err := strconv.Atoi(prePullTimeoutMins) - return time.Duration(timeout) * time.Minute, err -} - -// PrePull makes sure the image is pre-pulled on the relevant nodes. -func PrePull(cli client.Client, pullSpec, namespace, tag string) (*appsv1.DaemonSet, error) { - name := PrePullPrefix + tag - ds := appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "name": "prepull-daemonset-" + tag, - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{ - "name": "prepull-daemonset-" + tag, - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "prepullcontainer", - Image: pullSpec, - Command: []string{"/bin/sleep"}, - Args: []string{"inf"}, - ImagePullPolicy: corev1.PullAlways, - }, - }, - }, - }, - }, - } - - prePullTimeout, err := GetPullTimeout() - if err != nil { - return &ds, err - } - testlog.Infof("pull timeout: %v", prePullTimeout) - - testlog.Infof("creating daemonset %s/%s to prepull %q", namespace, name, pullSpec) - ts := time.Now() - err = cli.Create(context.TODO(), &ds) - if err != nil { - return &ds, err - } - data, _ := json.Marshal(ds) - testlog.Infof("created daemonset %s/%s to prepull %q:\n%s", namespace, name, pullSpec, string(data)) - - err = testds.WaitToBeRunningWithTimeout(testclient.Client, ds.Namespace, ds.Name, prePullTimeout) - if err != nil { - // if this fails, no big deal, we are just trying to make the troubleshooting easier - updatedDs, _ := testds.GetByName(testclient.Client, ds.Namespace, ds.Name) - return updatedDs, err - } - testlog.Infof("prepulled %q in %v", pullSpec, time.Since(ts)) - return nil, nil -} diff --git a/test/e2e/pao/functests/utils/junit/reporter.go b/test/e2e/pao/functests/utils/junit/reporter.go deleted file mode 100644 index 85b62af18..000000000 --- a/test/e2e/pao/functests/utils/junit/reporter.go +++ /dev/null @@ -1,18 +0,0 @@ -package junit - -import ( - "flag" - "fmt" - "github.com/onsi/ginkgo/reporters" -) - -var junitDir *string - -func init() { - junitDir = flag.String("junitDir", ".", "the directory for the junit format report") -} - -// NewJUnitReporter with the given name. testSuiteName must be a valid filename part -func NewJUnitReporter(testSuiteName string) *reporters.JUnitReporter { - return reporters.NewJUnitReporter(fmt.Sprintf("%s/%s_%s.xml", *junitDir, "unit_report", testSuiteName)) -} diff --git a/test/e2e/pao/functests/utils/log/log.go b/test/e2e/pao/functests/utils/log/log.go deleted file mode 100644 index 2df2061db..000000000 --- a/test/e2e/pao/functests/utils/log/log.go +++ /dev/null @@ -1,52 +0,0 @@ -package log - -import ( - "fmt" - "time" - - "github.com/onsi/ginkgo" -) - -func nowStamp() string { - return time.Now().Format(time.StampMilli) -} - -func logf(level string, format string, args ...interface{}) { - fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) -} - -func log(level string, args ...interface{}) { - fmt.Fprint(ginkgo.GinkgoWriter, nowStamp()+": "+level+": ") - fmt.Fprint(ginkgo.GinkgoWriter, args...) - fmt.Fprint(ginkgo.GinkgoWriter, "\n") -} - -// Info logs the info -func Info(args ...interface{}) { - log("[INFO]", args...) -} - -// Infof logs the info with arguments -func Infof(format string, args ...interface{}) { - logf("[INFO]", format, args...) -} - -// Warning logs the warning -func Warning(args ...interface{}) { - log("[WARNING]", args...) -} - -// Warningf logs the warning with arguments -func Warningf(format string, args ...interface{}) { - logf("[WARNING]", format, args...) -} - -// Error logs the warning -func Error(args ...interface{}) { - log("[ERROR]", args...) -} - -// Errorf logs the warning with arguments -func Errorf(format string, args ...interface{}) { - logf("[ERROR]", format, args...) -} diff --git a/test/e2e/pao/functests/utils/mcps/mcps.go b/test/e2e/pao/functests/utils/mcps/mcps.go deleted file mode 100644 index b42f1ab0e..000000000 --- a/test/e2e/pao/functests/utils/mcps/mcps.go +++ /dev/null @@ -1,235 +0,0 @@ -package mcps - -import ( - "context" - "time" - - . "github.com/onsi/gomega" - "github.com/pkg/errors" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - - "sigs.k8s.io/controller-runtime/pkg/client" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/machineconfig" - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components/profile" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/nodes" - machineconfigv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" -) - -const ( - mcpUpdateTimeoutPerNode = 30 -) - -// GetByLabel returns all MCPs with the specified label -func GetByLabel(key, value string) ([]machineconfigv1.MachineConfigPool, error) { - selector := labels.NewSelector() - req, err := labels.NewRequirement(key, selection.Equals, []string{value}) - if err != nil { - return nil, err - } - selector = selector.Add(*req) - mcps := &machineconfigv1.MachineConfigPoolList{} - if err := testclient.Client.List(context.TODO(), mcps, &client.ListOptions{LabelSelector: selector}); err != nil { - return nil, err - } - if len(mcps.Items) > 0 { - return mcps.Items, nil - } - // fallback to look for a mcp with the same nodeselector. - // key value may come from a node selector, so looking for a mcp - // that targets the same nodes is legit - if err := testclient.Client.List(context.TODO(), mcps); err != nil { - return nil, err - } - res := []machineconfigv1.MachineConfigPool{} - for _, item := range mcps.Items { - if item.Spec.NodeSelector.MatchLabels[key] == value { - res = append(res, item) - } - nodeRoleKey := components.NodeRoleLabelPrefix + value - - if _, ok := item.Spec.NodeSelector.MatchLabels[nodeRoleKey]; ok { - res = append(res, item) - } - } - return res, nil -} - -// GetByName returns the MCP with the specified name -func GetByName(name string) (*machineconfigv1.MachineConfigPool, error) { - mcp := &machineconfigv1.MachineConfigPool{} - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - err := testclient.GetWithRetry(context.TODO(), key, mcp) - return mcp, err -} - -// GetByNameNoRetry returns the MCP with the specified name without retrying to poke -// the api server -func GetByNameNoRetry(name string) (*machineconfigv1.MachineConfigPool, error) { - mcp := &machineconfigv1.MachineConfigPool{} - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - err := testclient.Client.Get(context.TODO(), key, mcp) - return mcp, err -} - -// GetByProfile returns the MCP by a given performance profile -func GetByProfile(performanceProfile *performancev2.PerformanceProfile) (string, error) { - mcpLabel := profile.GetMachineConfigLabel(performanceProfile) - key, value := components.GetFirstKeyAndValue(mcpLabel) - mcpsByLabel, err := GetByLabel(key, value) - if err != nil { - return "", err - } - performanceMCP := &mcpsByLabel[0] - return performanceMCP.Name, nil -} - -// New creates a new MCP with the given name and node selector -func New(mcpName string, nodeSelector map[string]string) *machineconfigv1.MachineConfigPool { - return &machineconfigv1.MachineConfigPool{ - ObjectMeta: metav1.ObjectMeta{ - Name: mcpName, - Namespace: metav1.NamespaceNone, - Labels: map[string]string{components.MachineConfigRoleLabelKey: mcpName}, - }, - Spec: machineconfigv1.MachineConfigPoolSpec{ - MachineConfigSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: components.MachineConfigRoleLabelKey, - Operator: "In", - Values: []string{"worker", mcpName}, - }, - }, - }, - NodeSelector: &metav1.LabelSelector{ - MatchLabels: nodeSelector, - }, - }, - } -} - -// GetConditionStatus return the condition status of the given MCP and condition type -func GetConditionStatus(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) corev1.ConditionStatus { - mcp, err := GetByNameNoRetry(mcpName) - if err != nil { - // In case of any error we just retry, as in case of single node cluster - // the only node may be rebooting - return corev1.ConditionUnknown - } - for _, condition := range mcp.Status.Conditions { - if condition.Type == conditionType { - return condition.Status - } - } - return corev1.ConditionUnknown -} - -// GetConditionReason return the reason of the given MCP -func GetConditionReason(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType) string { - mcp, err := GetByName(mcpName) - ExpectWithOffset(1, err).ToNot(HaveOccurred(), "Failed getting MCP %q by name", mcpName) - for _, condition := range mcp.Status.Conditions { - if condition.Type == conditionType { - return condition.Reason - } - } - return "" -} - -// WaitForCondition waits for the MCP with given name having a condition of given type with given status -func WaitForCondition(mcpName string, conditionType machineconfigv1.MachineConfigPoolConditionType, conditionStatus corev1.ConditionStatus) { - - var cnfNodes []corev1.Node - runningOnSingleNode, err := cluster.IsSingleNode() - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - // checking in eventually as in case of single node cluster the only node may - // be rebooting - EventuallyWithOffset(1, func() error { - mcp, err := GetByName(mcpName) - if err != nil { - return errors.Wrap(err, "Failed getting MCP by name") - } - - nodeLabels := mcp.Spec.NodeSelector.MatchLabels - key, _ := components.GetFirstKeyAndValue(nodeLabels) - req, err := labels.NewRequirement(key, selection.Exists, []string{}) - if err != nil { - return errors.Wrap(err, "Failed creating node selector") - } - - selector := labels.NewSelector() - selector = selector.Add(*req) - cnfNodes, err = nodes.GetBySelector(selector) - if err != nil { - return errors.Wrap(err, "Failed getting nodes by selector") - } - - testlog.Infof("MCP %q is targeting %v node(s)", mcp.Name, len(cnfNodes)) - return nil - }, cluster.ComputeTestTimeout(10*time.Minute, runningOnSingleNode), 5*time.Second).ShouldNot(HaveOccurred(), "Failed to find CNF nodes by MCP %q", mcpName) - - // timeout should be based on the number of worker-cnf nodes - timeout := time.Duration(len(cnfNodes)*mcpUpdateTimeoutPerNode) * time.Minute - if len(cnfNodes) == 0 { - timeout = 2 * time.Minute - } - - EventuallyWithOffset(1, func() corev1.ConditionStatus { - return GetConditionStatus(mcpName, conditionType) - }, cluster.ComputeTestTimeout(timeout, runningOnSingleNode), 30*time.Second).Should(Equal(conditionStatus), "Failed to find condition status by MCP %q", mcpName) -} - -// WaitForProfilePickedUp waits for the MCP with given name containing the MC created for the PerformanceProfile with the given name -func WaitForProfilePickedUp(mcpName string, profile *performancev2.PerformanceProfile) { - runningOnSingleNode, err := cluster.IsSingleNode() - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - testlog.Infof("Waiting for profile %s to be picked up by the %s machine config pool", profile.Name, mcpName) - defer testlog.Infof("Profile %s picked up by the %s machine config pool", profile.Name, mcpName) - EventuallyWithOffset(1, func() bool { - mcp, err := GetByName(mcpName) - // we ignore the error and just retry in case of single node cluster - if err != nil { - return false - } - for _, source := range mcp.Spec.Configuration.Source { - if source.Name == machineconfig.GetMachineConfigName(profile) { - return true - } - } - return false - }, cluster.ComputeTestTimeout(10*time.Minute, runningOnSingleNode), 30*time.Second).Should(BeTrue(), "PerformanceProfile's %q MC was not picked up by MCP %q in time", profile.Name, mcpName) -} - -func Delete(name string) error { - mcp := &machineconfigv1.MachineConfigPool{} - if err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: name}, mcp); err != nil { - if apierrors.IsNotFound(err) { - return nil - } - return err - } - - if err := testclient.Client.Delete(context.TODO(), mcp); err != nil { - return err - } - - return nil -} diff --git a/test/e2e/pao/functests/utils/namespaces/namespaces.go b/test/e2e/pao/functests/utils/namespaces/namespaces.go deleted file mode 100644 index 46995a85c..000000000 --- a/test/e2e/pao/functests/utils/namespaces/namespaces.go +++ /dev/null @@ -1,49 +0,0 @@ -package namespaces - -import ( - "context" - "os" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" -) - -// PerformanceOperator contains the name of the performance operator namespace -// default as recommended in -// https://docs.openshift.com/container-platform/4.6/scalability_and_performance/cnf-performance-addon-operator-for-low-latency-nodes.html#install-operator-cli_cnf-master -var PerformanceOperator string = "openshift-cluster-node-tuning-operator" - -func init() { - if operatorNS, ok := os.LookupEnv("PERFORMANCE_OPERATOR_NAMESPACE"); ok { - PerformanceOperator = operatorNS - } -} - -// TestingNamespace is the namespace the tests will use for running test pods -var TestingNamespace = &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: testutils.NamespaceTesting, - }, -} - -// WaitForDeletion waits until the namespace will be removed from the cluster -func WaitForDeletion(name string, timeout time.Duration) error { - key := types.NamespacedName{ - Name: name, - Namespace: metav1.NamespaceNone, - } - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - ns := &corev1.Namespace{} - if err := testclient.Client.Get(context.TODO(), key, ns); errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) -} diff --git a/test/e2e/pao/functests/utils/nodes/nodes.go b/test/e2e/pao/functests/utils/nodes/nodes.go deleted file mode 100644 index 8b140babc..000000000 --- a/test/e2e/pao/functests/utils/nodes/nodes.go +++ /dev/null @@ -1,342 +0,0 @@ -package nodes - -import ( - "context" - "encoding/json" - "fmt" - "path" - "strconv" - "strings" - "time" - - . "github.com/onsi/gomega" - - "github.com/ghodss/yaml" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/kubernetes/pkg/kubelet/cm/cpuset" - - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/cluster" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - testpods "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/pods" -) - -const ( - testTimeout = 480 - testPollInterval = 2 -) - -const ( - sysDevicesOnlineCPUs = "/sys/devices/system/cpu/online" -) - -// NumaNodes defines cpus in each numa node -type NumaNodes struct { - Cpus []NodeCPU `json:"cpus"` -} - -// NodeCPU Structure -type NodeCPU struct { - CPU string `json:"cpu"` - Node string `json:"node"` -} - -// GetByRole returns all nodes with the specified role -func GetByRole(role string) ([]corev1.Node, error) { - selector, err := labels.Parse(fmt.Sprintf("%s/%s=", testutils.LabelRole, role)) - if err != nil { - return nil, err - } - return GetBySelector(selector) -} - -// GetBySelector returns all nodes with the specified selector -func GetBySelector(selector labels.Selector) ([]corev1.Node, error) { - nodes := &corev1.NodeList{} - if err := testclient.Client.List(context.TODO(), nodes, &client.ListOptions{LabelSelector: selector}); err != nil { - return nil, err - } - return nodes.Items, nil -} - -// GetByLabels returns all nodes with the specified labels -func GetByLabels(nodeLabels map[string]string) ([]corev1.Node, error) { - selector := labels.SelectorFromSet(nodeLabels) - return GetBySelector(selector) -} - -// GetByName returns a node object by for a node name -func GetByName(nodeName string) (*corev1.Node, error) { - node := &corev1.Node{} - key := types.NamespacedName{ - Name: nodeName, - } - if err := testclient.Client.Get(context.TODO(), key, node); err != nil { - return nil, fmt.Errorf("failed to get node for the node %q", node.Name) - } - return node, nil -} - -// GetNonPerformancesWorkers returns list of nodes with non matching perfomance profile labels -func GetNonPerformancesWorkers(nodeSelectorLabels map[string]string) ([]corev1.Node, error) { - nonPerformanceWorkerNodes := []corev1.Node{} - workerNodes, err := GetByRole(testutils.RoleWorker) - for _, node := range workerNodes { - for label := range nodeSelectorLabels { - if _, ok := node.Labels[label]; !ok { - nonPerformanceWorkerNodes = append(nonPerformanceWorkerNodes, node) - break - } - } - } - return nonPerformanceWorkerNodes, err -} - -// GetMachineConfigDaemonByNode returns the machine-config-daemon pod that runs on the specified node -func GetMachineConfigDaemonByNode(node *corev1.Node) (*corev1.Pod, error) { - listOptions := &client.ListOptions{ - Namespace: testutils.NamespaceMachineConfigOperator, - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), - LabelSelector: labels.SelectorFromSet(labels.Set{"k8s-app": "machine-config-daemon"}), - } - - mcds := &corev1.PodList{} - if err := testclient.Client.List(context.TODO(), mcds, listOptions); err != nil { - return nil, err - } - - if len(mcds.Items) < 1 { - return nil, fmt.Errorf("failed to get machine-config-daemon pod for the node %q", node.Name) - } - return &mcds.Items[0], nil -} - -// ExecCommandOnMachineConfigDaemon returns the output of the command execution on the machine-config-daemon pod that runs on the specified node -func ExecCommandOnMachineConfigDaemon(node *corev1.Node, command []string) ([]byte, error) { - mcd, err := GetMachineConfigDaemonByNode(node) - if err != nil { - return nil, err - } - testlog.Infof("found mcd %s for node %s", mcd.Name, node.Name) - - return testpods.WaitForPodOutput(testclient.K8sClient, mcd, command) -} - -// ExecCommandOnNode executes given command on given node and returns the result -func ExecCommandOnNode(cmd []string, node *corev1.Node) (string, error) { - out, err := ExecCommandOnMachineConfigDaemon(node, cmd) - if err != nil { - return "", err - } - - trimmedString := strings.Trim(string(out), "\n") - return strings.ReplaceAll(trimmedString, "\r", ""), nil -} - -// GetKubeletConfig returns KubeletConfiguration loaded from the node /etc/kubernetes/kubelet.conf -func GetKubeletConfig(node *corev1.Node) (*kubeletconfigv1beta1.KubeletConfiguration, error) { - command := []string{"cat", path.Join("/rootfs", testutils.FilePathKubeletConfig)} - kubeletBytes, err := ExecCommandOnMachineConfigDaemon(node, command) - if err != nil { - return nil, err - } - - testlog.Infof("command output: %s", string(kubeletBytes)) - kubeletConfig := &kubeletconfigv1beta1.KubeletConfiguration{} - if err := yaml.Unmarshal(kubeletBytes, kubeletConfig); err != nil { - return nil, err - } - return kubeletConfig, err -} - -// MatchingOptionalSelector filter the given slice with only the nodes matching the optional selector. -// If no selector is set, it returns the same list. -// The NODES_SELECTOR must be set with a labelselector expression. -// For example: NODES_SELECTOR="sctp=true" -// Inspired from: https://github.com/fedepaol/sriov-network-operator/blob/master/test/util/nodes/nodes.go -func MatchingOptionalSelector(toFilter []corev1.Node) ([]corev1.Node, error) { - if testutils.NodesSelector == "" { - return toFilter, nil - } - - selector, err := labels.Parse(testutils.NodesSelector) - if err != nil { - return nil, fmt.Errorf("Error parsing the %s label selector, %v", testutils.NodesSelector, err) - } - - toMatch, err := GetBySelector(selector) - if err != nil { - return nil, fmt.Errorf("Error in getting nodes matching the %s label selector, %v", testutils.NodesSelector, err) - } - if len(toMatch) == 0 { - return nil, fmt.Errorf("Failed to get nodes matching %s label selector", testutils.NodesSelector) - } - - res := make([]corev1.Node, 0) - for _, n := range toFilter { - for _, m := range toMatch { - if n.Name == m.Name { - res = append(res, n) - break - } - } - } - - return res, nil -} - -// HasPreemptRTKernel returns no error if the node booted with PREEMPT RT kernel -func HasPreemptRTKernel(node *corev1.Node) error { - // verify that the kernel-rt-core installed it also means the the machine booted with the RT kernel - // because the machine-config-daemon uninstalls regular kernel once you install the RT one and - // on traditional yum systems, rpm -q kernel can be completely different from what you're booted - // because yum keeps multiple kernels but only one userspace; - // with rpm-ostree rpm -q is telling you what you're booted into always, - // because ostree binds together (kernel, userspace) as a single commit. - cmd := []string{"chroot", "/rootfs", "rpm", "-q", "kernel-rt-core"} - if _, err := ExecCommandOnNode(cmd, node); err != nil { - return err - } - - cmd = []string{"/bin/bash", "-c", "cat /rootfs/sys/kernel/realtime"} - out, err := ExecCommandOnNode(cmd, node) - if err != nil { - return err - } - - if out != "1" { - return fmt.Errorf("RT kernel disabled") - } - - return nil -} - -func BannedCPUs(node corev1.Node) (banned cpuset.CPUSet, err error) { - cmd := []string{"sed", "-n", "s/^IRQBALANCE_BANNED_CPUS=\\(.*\\)/\\1/p", "/rootfs/etc/sysconfig/irqbalance"} - bannedCPUs, err := ExecCommandOnNode(cmd, &node) - if err != nil { - return cpuset.NewCPUSet(), fmt.Errorf("failed to execute %v: %v", cmd, err) - } - - if bannedCPUs == "" { - testlog.Infof("Banned CPUs on node %q returned empty set", node.Name) - return cpuset.NewCPUSet(), nil // TODO: should this be a error? - } - - banned, err = components.CPUMaskToCPUSet(bannedCPUs) - if err != nil { - return cpuset.NewCPUSet(), fmt.Errorf("failed to parse the banned CPUs: %v", err) - } - - return banned, nil -} - -// GetDefaultSmpAffinitySet returns the default smp affinity mask for the node -func GetDefaultSmpAffinitySet(node *corev1.Node) (cpuset.CPUSet, error) { - command := []string{"cat", "/proc/irq/default_smp_affinity"} - defaultSmpAffinity, err := ExecCommandOnNode(command, node) - if err != nil { - return cpuset.NewCPUSet(), err - } - return components.CPUMaskToCPUSet(defaultSmpAffinity) -} - -// GetOnlineCPUsSet returns the list of online (being scheduled) CPUs on the node -func GetOnlineCPUsSet(node *corev1.Node) (cpuset.CPUSet, error) { - command := []string{"cat", sysDevicesOnlineCPUs} - onlineCPUs, err := ExecCommandOnNode(command, node) - if err != nil { - return cpuset.NewCPUSet(), err - } - return cpuset.Parse(onlineCPUs) -} - -// GetSMTLevel returns the SMT level on the node using the given cpuID as target -// Use a random cpuID from the return value of GetOnlineCPUsSet if not sure -func GetSMTLevel(cpuID int, node *corev1.Node) int { - cmd := []string{"/bin/sh", "-c", fmt.Sprintf("cat /sys/devices/system/cpu/cpu%d/topology/thread_siblings_list | tr -d \"\n\r\"", cpuID)} - threadSiblingsList, err := ExecCommandOnNode(cmd, node) - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - // how many thread sibling you have = SMT level - // example: 2-way SMT means 2 threads sibling for each thread - cpus, err := cpuset.Parse(strings.TrimSpace(string(threadSiblingsList))) - ExpectWithOffset(1, err).ToNot(HaveOccurred()) - return cpus.Size() -} - -// GetNumaNodes returns the number of numa nodes and the associated cpus as list on the node -func GetNumaNodes(node *corev1.Node) (map[int][]int, error) { - lscpuCmd := []string{"lscpu", "-e=cpu,node", "-J"} - cmdout, err := ExecCommandOnNode(lscpuCmd, node) - var numaNode, cpu int - if err != nil { - return nil, err - } - numaCpus := make(map[int][]int) - var result NumaNodes - err = json.Unmarshal([]byte(cmdout), &result) - if err != nil { - return nil, err - } - for _, value := range result.Cpus { - if numaNode, err = strconv.Atoi(value.Node); err != nil { - break - } - if cpu, err = strconv.Atoi(value.CPU); err != nil { - break - } - numaCpus[numaNode] = append(numaCpus[numaNode], cpu) - } - return numaCpus, err -} - -//TunedForNode find tuned pod for appropriate node -func TunedForNode(node *corev1.Node, sno bool) *corev1.Pod { - - listOptions := &client.ListOptions{ - Namespace: components.NamespaceNodeTuningOperator, - FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": node.Name}), - LabelSelector: labels.SelectorFromSet(labels.Set{"openshift-app": "tuned"}), - } - - tunedList := &corev1.PodList{} - Eventually(func() bool { - if err := testclient.Client.List(context.TODO(), tunedList, listOptions); err != nil { - return false - } - - if len(tunedList.Items) == 0 { - return false - } - for _, s := range tunedList.Items[0].Status.ContainerStatuses { - if s.Ready == false { - return false - } - } - return true - - }, cluster.ComputeTestTimeout(testTimeout*time.Second, sno), testPollInterval*time.Second).Should(BeTrue(), - "there should be one tuned daemon per node") - - return &tunedList.Items[0] -} - -func GetByCpuAllocatable(nodesList []corev1.Node, cpuQty int) []corev1.Node { - nodesWithSufficientCpu := []corev1.Node{} - for _, node := range nodesList { - allocatableCPU, _ := node.Status.Allocatable.Cpu().AsInt64() - if allocatableCPU >= int64(cpuQty) { - nodesWithSufficientCpu = append(nodesWithSufficientCpu, node) - } - } - return nodesWithSufficientCpu -} diff --git a/test/e2e/pao/functests/utils/pods/pods.go b/test/e2e/pao/functests/utils/pods/pods.go deleted file mode 100644 index a813c823f..000000000 --- a/test/e2e/pao/functests/utils/pods/pods.go +++ /dev/null @@ -1,219 +0,0 @@ -package pods - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "strings" - "time" - - "sigs.k8s.io/controller-runtime/pkg/client/config" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/tools/remotecommand" - "sigs.k8s.io/controller-runtime/pkg/client" - - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/images" -) - -// DefaultDeletionTimeout contains the default pod deletion timeout in seconds -const DefaultDeletionTimeout = 120 - -// GetTestPod returns pod with the busybox image -func GetTestPod() *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: "test-", - Labels: map[string]string{ - "test": "", - }, - }, - Spec: corev1.PodSpec{ - Containers: []corev1.Container{ - { - Name: "test", - Image: images.Test(), - Command: []string{"sleep", "10h"}, - }, - }, - }, - } -} - -// WaitForDeletion waits until the pod will be removed from the cluster -func WaitForDeletion(pod *corev1.Pod, timeout time.Duration) error { - key := types.NamespacedName{ - Name: pod.Name, - Namespace: pod.Namespace, - } - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - pod := &corev1.Pod{} - if err := testclient.Client.Get(context.TODO(), key, pod); errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) -} - -// WaitForCondition waits until the pod will have specified condition type with the expected status -func WaitForCondition(pod *corev1.Pod, conditionType corev1.PodConditionType, conditionStatus corev1.ConditionStatus, timeout time.Duration) error { - key := types.NamespacedName{ - Name: pod.Name, - Namespace: pod.Namespace, - } - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - updatedPod := &corev1.Pod{} - if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil { - return false, nil - } - - for _, c := range updatedPod.Status.Conditions { - if c.Type == conditionType && c.Status == conditionStatus { - return true, nil - } - } - return false, nil - }) -} - -// WaitForPredicate waits until the given predicate against the pod returns true or error. -func WaitForPredicate(pod *corev1.Pod, timeout time.Duration, pred func(pod *corev1.Pod) (bool, error)) error { - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - updatedPod := &corev1.Pod{} - if err := testclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(pod), updatedPod); err != nil { - return false, nil - } - - ret, err := pred(updatedPod) - if err != nil { - return false, err - } - return ret, nil - }) -} - -// WaitForPhase waits until the pod will have specified phase -func WaitForPhase(pod *corev1.Pod, phase corev1.PodPhase, timeout time.Duration) error { - key := types.NamespacedName{ - Name: pod.Name, - Namespace: pod.Namespace, - } - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - updatedPod := &corev1.Pod{} - if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil { - return false, nil - } - - if updatedPod.Status.Phase == phase { - return true, nil - } - - return false, nil - }) -} - -// GetLogs returns logs of the specified pod -func GetLogs(c *kubernetes.Clientset, pod *corev1.Pod) (string, error) { - logStream, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &corev1.PodLogOptions{}).Stream(context.TODO()) - if err != nil { - return "", err - } - defer logStream.Close() - - buf := new(bytes.Buffer) - if _, err := io.Copy(buf, logStream); err != nil { - return "", err - } - - return buf.String(), nil -} - -// ExecCommandOnPod runs command in the pod and returns buffer output -func ExecCommandOnPod(c *kubernetes.Clientset, pod *corev1.Pod, command []string) ([]byte, error) { - var outputBuf bytes.Buffer - var errorBuf bytes.Buffer - - req := c.CoreV1().RESTClient(). - Post(). - Namespace(pod.Namespace). - Resource("pods"). - Name(pod.Name). - SubResource("exec"). - VersionedParams(&corev1.PodExecOptions{ - Container: pod.Spec.Containers[0].Name, - Command: command, - Stdin: true, - Stdout: true, - Stderr: true, - TTY: true, - }, scheme.ParameterCodec) - - cfg, err := config.GetConfig() - if err != nil { - return nil, err - } - - exec, err := remotecommand.NewSPDYExecutor(cfg, "POST", req.URL()) - if err != nil { - return nil, err - } - - err = exec.Stream(remotecommand.StreamOptions{ - Stdin: os.Stdin, - Stdout: &outputBuf, - Stderr: &errorBuf, - Tty: true, - }) - if err != nil { - return nil, fmt.Errorf("failed to run command %v: output %s; error %s", command, outputBuf.String(), errorBuf.String()) - } - - if errorBuf.Len() != 0 { - return nil, fmt.Errorf("failed to run command %v: output %s; error %s", command, outputBuf.String(), errorBuf.String()) - } - - return outputBuf.Bytes(), nil -} - -func WaitForPodOutput(c *kubernetes.Clientset, pod *corev1.Pod, command []string) ([]byte, error) { - var out []byte - if err := wait.PollImmediate(15*time.Second, time.Minute, func() (done bool, err error) { - out, err = ExecCommandOnPod(c, pod, command) - if err != nil { - return false, err - } - - return len(out) != 0, nil - }); err != nil { - return nil, err - } - - return out, nil -} - -// GetContainerIDByName returns container ID under the pod by the container name -func GetContainerIDByName(pod *corev1.Pod, containerName string) (string, error) { - updatedPod := &corev1.Pod{} - key := types.NamespacedName{ - Name: pod.Name, - Namespace: pod.Namespace, - } - if err := testclient.Client.Get(context.TODO(), key, updatedPod); err != nil { - return "", err - } - for _, containerStatus := range updatedPod.Status.ContainerStatuses { - if containerStatus.Name == containerName { - return strings.Trim(containerStatus.ContainerID, "cri-o://"), nil - } - } - return "", fmt.Errorf("failed to find the container ID for the container %q under the pod %q", containerName, pod.Name) -} diff --git a/test/e2e/pao/functests/utils/profiles/profiles.go b/test/e2e/pao/functests/utils/profiles/profiles.go deleted file mode 100644 index 7915dee7e..000000000 --- a/test/e2e/pao/functests/utils/profiles/profiles.go +++ /dev/null @@ -1,147 +0,0 @@ -package profiles - -import ( - "context" - "fmt" - "reflect" - "time" - - . "github.com/onsi/gomega" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - - performancev2 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/pao/v2" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" - v1 "github.com/openshift/custom-resource-status/conditions/v1" -) - -// GetByNodeLabels gets the performance profile that must have node selector equals to passed node labels -func GetByNodeLabels(nodeLabels map[string]string) (*performancev2.PerformanceProfile, error) { - profiles, err := All() - if err != nil { - return nil, err - } - - var result *performancev2.PerformanceProfile - for i := 0; i < len(profiles.Items); i++ { - if reflect.DeepEqual(profiles.Items[i].Spec.NodeSelector, nodeLabels) { - if result != nil { - return nil, fmt.Errorf("found more than one performance profile with specified node selector %v", nodeLabels) - } - result = &profiles.Items[i] - } - } - - if result == nil { - return nil, fmt.Errorf("failed to find performance profile with specified node selector %v", nodeLabels) - } - - return result, nil -} - -// WaitForDeletion waits until the pod will be removed from the cluster -func WaitForDeletion(profileKey types.NamespacedName, timeout time.Duration) error { - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - prof := &performancev2.PerformanceProfile{} - if err := testclient.Client.Get(context.TODO(), profileKey, prof); errors.IsNotFound(err) { - return true, nil - } - return false, nil - }) -} - -// GetCondition the performance profile condition for the given type -func GetCondition(nodeLabels map[string]string, conditionType v1.ConditionType) *v1.Condition { - profile, err := GetByNodeLabels(nodeLabels) - ExpectWithOffset(1, err).ToNot(HaveOccurred(), "Failed getting profile by nodelabels %v", nodeLabels) - for _, condition := range profile.Status.Conditions { - if condition.Type == conditionType { - return &condition - } - } - return nil -} - -// GetConditionMessage gets the performance profile message for the given type -func GetConditionMessage(nodeLabels map[string]string, conditionType v1.ConditionType) string { - cond := GetCondition(nodeLabels, conditionType) - if cond != nil { - return cond.Message - } - return "" -} - -func GetConditionWithStatus(nodeLabels map[string]string, conditionType v1.ConditionType) *v1.Condition { - var cond *v1.Condition - EventuallyWithOffset(1, func() bool { - cond = GetCondition(nodeLabels, conditionType) - if cond == nil { - return false - } - return cond.Status == corev1.ConditionTrue - }, 30, 5).Should(BeTrue(), "condition %q not matched: %#v", conditionType, cond) - return cond -} - -// All gets all the exiting profiles in the cluster -func All() (*performancev2.PerformanceProfileList, error) { - profiles := &performancev2.PerformanceProfileList{} - if err := testclient.Client.List(context.TODO(), profiles); err != nil { - return nil, err - } - return profiles, nil -} - -func UpdateWithRetry(profile *performancev2.PerformanceProfile) { - EventuallyWithOffset(1, func() error { - updatedProfile := &performancev2.PerformanceProfile{} - key := types.NamespacedName{ - Name: profile.Name, - Namespace: profile.Namespace, - } - // We should get the updated version of the performance profile. - // Otherwise, we will always try to update the profile with the old resource version - // and will always get the conflict error - if err := testclient.Client.Get(context.TODO(), key, updatedProfile); err != nil { - return err - } - - updatedProfile.Spec = *profile.Spec.DeepCopy() - if err := testclient.Client.Update(context.TODO(), profile); err != nil { - if !errors.IsConflict(err) { - testlog.Errorf("failed to update the profile %q: %v", profile.Name, err) - } - - return err - } - - return nil - }, time.Minute, 5*time.Second).Should(BeNil()) -} - -func WaitForCondition(nodeLabels map[string]string, conditionType v1.ConditionType, conditionStatus corev1.ConditionStatus) { - EventuallyWithOffset(1, func() corev1.ConditionStatus { - return (GetCondition(nodeLabels, conditionType)).Status - }, 15*time.Minute, 30*time.Second).Should(Equal(conditionStatus), "Failed to met performance profile condition %v", conditionType) -} - -// Delete delete the existing profile by name -func Delete(name string) error { - profile := &performancev2.PerformanceProfile{} - if err := testclient.Client.Get(context.TODO(), types.NamespacedName{Name: name}, profile); err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - - if err := testclient.Client.Delete(context.TODO(), profile); err != nil { - return err - } - - return nil -} diff --git a/test/e2e/pao/functests/utils/tuned/tuned.go b/test/e2e/pao/functests/utils/tuned/tuned.go deleted file mode 100644 index b13dd887f..000000000 --- a/test/e2e/pao/functests/utils/tuned/tuned.go +++ /dev/null @@ -1,55 +0,0 @@ -package tuned - -import ( - "context" - "fmt" - "time" - - tunedv1 "github.com/openshift/cluster-node-tuning-operator/pkg/apis/tuned/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" - - "github.com/openshift/cluster-node-tuning-operator/pkg/pao/controller/performanceprofile/components" - testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/client" -) - -func WaitForAppliedCondition(tunedProfileNames []string, conditionStatus corev1.ConditionStatus, timeout time.Duration) error { - return wait.PollImmediate(time.Second, timeout, func() (bool, error) { - for _, tunedProfileName := range tunedProfileNames { - profile := &tunedv1.Profile{} - key := types.NamespacedName{ - Name: tunedProfileName, - Namespace: components.NamespaceNodeTuningOperator, - } - - if err := testclient.Client.Get(context.TODO(), key, profile); err != nil { - klog.Errorf("failed to get tuned profile %q: %v", tunedProfileName, err) - return false, nil - } - - appliedCondition, err := GetConditionByType(profile.Status.Conditions, tunedv1.TunedProfileApplied) - if err != nil { - klog.Errorf("failed to get applied condition for profile %q: %v", tunedProfileName, err) - return false, nil - } - - if appliedCondition.Status != conditionStatus { - return false, nil - } - } - - return true, nil - }) -} - -func GetConditionByType(conditions []tunedv1.ProfileStatusCondition, conditionType tunedv1.ProfileConditionType) (*tunedv1.ProfileStatusCondition, error) { - for i := range conditions { - c := &conditions[i] - if c.Type == conditionType { - return c, nil - } - } - return nil, fmt.Errorf("failed to found applied condition under conditions %v", conditions) -} diff --git a/test/e2e/pao/functests/utils/utils.go b/test/e2e/pao/functests/utils/utils.go deleted file mode 100644 index ce6858e77..000000000 --- a/test/e2e/pao/functests/utils/utils.go +++ /dev/null @@ -1,59 +0,0 @@ -package utils - -import ( - "bytes" - "context" - "fmt" - "os/exec" - "time" - - . "github.com/onsi/ginkgo" - - testlog "github.com/openshift/cluster-node-tuning-operator/test/e2e/pao/functests/utils/log" -) - -const defaultExecTimeout = 2 * time.Minute - -func BeforeAll(fn func()) { - first := true - BeforeEach(func() { - if first { - fn() - first = false - } - }) -} - -func ExecAndLogCommand(name string, arg ...string) ([]byte, error) { - outData, _, err := ExecAndLogCommandWithStderr(name, arg...) - return outData, err -} - -func ExecAndLogCommandWithStderr(name string, arg ...string) ([]byte, []byte, error) { - // Create a new context and add a timeout to it - ctx, cancel := context.WithTimeout(context.Background(), defaultExecTimeout) - defer cancel() // The cancel should be deferred so resources are cleaned up - - var stdout bytes.Buffer - var stderr bytes.Buffer - cmd := exec.CommandContext(ctx, name, arg...) - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - err := cmd.Run() - outData := stdout.Bytes() - errData := stderr.Bytes() - testlog.Infof("run command '%s %v' (err=%v):\n stdout=%q\n stderr=%q", name, arg, err, outData, errData) - - // We want to check the context error to see if the timeout was executed. - // The error returned by cmd.Output() will be OS specific based on what - // happens when a process is killed. - if ctx.Err() == context.DeadlineExceeded { - return nil, nil, fmt.Errorf("command '%s %v' failed because of the timeout", name, arg) - } - - if _, ok := err.(*exec.ExitError); ok { - testlog.Infof("run command '%s %v' (err=%v):\n stderr=%s", name, arg, err, string(errData)) - } - return outData, errData, err -} diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml deleted file mode 100755 index 6b2569577..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master1.yaml +++ /dev/null @@ -1,456 +0,0 @@ ---- -apiVersion: v1 -kind: Node -metadata: - annotations: - machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/reason: "" - machineconfiguration.openshift.io/state: Done - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2020-11-25T07:56:34Z" - finalizers: - - metal3.io/capbm - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: master1 - kubernetes.io/os: linux - node-role.kubernetes.io/master: "" - node-role.kubernetes.io/virtual: "" - node.openshift.io/os_id: rhcos - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:beta.kubernetes.io/arch: {} - f:beta.kubernetes.io/os: {} - f:spec: - f:podCIDR: {} - f:podCIDRs: - .: {} - v:"10.132.2.0/24": {} - f:taints: {} - manager: kube-controller-manager - operation: Update - time: "2020-11-25T07:59:04Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/currentConfig: {} - f:machineconfiguration.openshift.io/desiredConfig: {} - f:machineconfiguration.openshift.io/reason: {} - f:machineconfiguration.openshift.io/state: {} - manager: machine-config-daemon - operation: Update - time: "2020-11-25T08:01:08Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machine.openshift.io/machine: {} - manager: nodelink-controller - operation: Update - time: "2020-11-25T08:14:20Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:finalizers: - .: {} - v:"metal3.io/capbm": {} - f:spec: - f:providerID: {} - manager: machine-controller-manager - operation: Update - time: "2020-11-25T08:14:22Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:node-role.kubernetes.io/virtual: {} - manager: kubectl-label - operation: Update - time: "2020-11-25T08:35:46Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:k8s.ovn.org/l3-gateway-config: {} - f:k8s.ovn.org/node-chassis-id: {} - f:k8s.ovn.org/node-join-subnets: {} - f:k8s.ovn.org/node-local-nat-ip: {} - f:k8s.ovn.org/node-mgmt-port-mac-address: {} - f:k8s.ovn.org/node-primary-ifaddr: {} - f:k8s.ovn.org/node-subnets: {} - manager: ovnkube - operation: Update - time: "2020-12-23T11:34:05Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: {} - f:volumes.kubernetes.io/controller-managed-attach-detach: {} - f:labels: - .: {} - f:kubernetes.io/arch: {} - f:kubernetes.io/hostname: {} - f:kubernetes.io/os: {} - f:node-role.kubernetes.io/master: {} - f:node.openshift.io/os_id: {} - f:status: - f:addresses: - .: {} - k:{"type":"Hostname"}: - .: {} - f:address: {} - f:type: {} - k:{"type":"InternalIP"}: - .: {} - f:address: {} - f:type: {} - f:allocatable: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:capacity: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:conditions: - .: {} - k:{"type":"DiskPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"MemoryPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"PIDPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"Ready"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - f:daemonEndpoints: - f:kubeletEndpoint: - f:Port: {} - f:images: {} - f:nodeInfo: - f:architecture: {} - f:bootID: {} - f:containerRuntimeVersion: {} - f:kernelVersion: {} - f:kubeProxyVersion: {} - f:kubeletVersion: {} - f:machineID: {} - f:operatingSystem: {} - f:osImage: {} - f:systemUUID: {} - manager: kubelet - operation: Update - time: "2021-02-19T00:31:02Z" - name: master1 - resourceVersion: "38562668" - selfLink: /api/v1/nodes/master1 - uid: 0feb1cf4-d396-4568-a79e-0fd7e771e966 -spec: - providerID: baremetalhost:///openshift-machine-api/cnfd1-master-1 - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -status: - addresses: - - address: master1 - type: Hostname - allocatable: - cpu: 7500m - ephemeral-storage: "94993016264" - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 31782368Ki - pods: "250" - capacity: - cpu: "8" - ephemeral-storage: 101796Mi - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 32933344Ki - pods: "250" - conditions: - - lastHeartbeatTime: "2021-02-19T00:31:02Z" - lastTransitionTime: "2020-11-25T07:56:34Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2021-02-19T00:31:02Z" - lastTransitionTime: "2020-11-25T07:56:34Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2021-02-19T00:31:02Z" - lastTransitionTime: "2020-11-25T07:56:34Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2021-02-19T00:31:02Z" - lastTransitionTime: "2020-11-25T07:59:04Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 884336421 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f27a23cd9f23951711f8aa7d66d4a6a1fd68071fa98ac0d5077a160a5d05f922 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 774713580 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 687443805 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 505930943 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 486536450 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e21960829179b702d31bb220f8b61b9715b8e0fd91d671b8615b0a8599cf1f0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 478316539 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 418066712 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 375119644 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 372122608 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 342541880 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341937980 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341611087 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b9d4f93a8f4c88792d7040fa3c0572150197cee01bbe97595068a778f99e5a1 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341519660 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 340736830 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bb2bd6e3755a5523c0ed2d27f159218501dac1c4978e9bf37de475caa7eb9279 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 340548837 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339726486 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7ea1a439cf30c216e0c201ceb5e6b51baf200e4df8353d8274449d682f5c82bc - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 339650807 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339116800 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 338045804 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c02fd4013a52b3d3047ae566f4e7e50c82c1087cb3acc59945cd01d718235e94 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 337522552 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 331223794 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 330508411 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 326516257 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98e66fae1973761fe4e11262a548f57495cea9db5279fb74be19e7debce21ada - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 324987061 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a9e1f154dc3826cac4cffe8b6a0b5b7b3e4630f50e87cc93a8ff18d72917242e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 323395352 - - names: - - registry.svc.ci.openshift.org/ocp/release@sha256:6681fc3f83dda0856b43cecd25f2d226c3f90e8a42c7144dbc499f6ee0a086fc - - registry.svc.ci.openshift.org/ocp/release@sha256: - sizeBytes: 322031372 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321353407 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320374187 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320369930 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cecd313f1a3ae30002be126597012302b54f2ae7d89b96c8bccc3eca2a06422 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320158358 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 319394632 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 318520120 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317658369 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dc542a6eba6c4f4660d1575b30c35bb567a1778cce74475e64ed433721774b10 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317575019 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9a2d75eb606e8cbf2fa0d203bfbc92e3db822286357c46d039ba74080c2dc08f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317484263 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6004374d28dc981473c392e52ff4d9d8ea1a753d560c6a2876f0aa84522f310c - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317310170 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:60ff0a413ba64ee38c13f13902071fc7306f24eb46edcacc8778507cf78f15ef - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317018973 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 316357508 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:04b8d9018fdffac86149bdc49dcf68bc4bbd58ab784dffef1d6d4bb33b901fb3 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 311142601 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 309676114 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305778268 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7cdb835bfea7798987ac6db71bdc8b9f771cc4bfff1e56fa51369667161b7e7c - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305316771 - - names: - - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826 - - quay.io/openshift/origin-sriov-network-device-plugin@sha256: - sizeBytes: 305304768 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305027413 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304947009 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304593198 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 297518232 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 295059596 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 278561358 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 275648050 - nodeInfo: - architecture: amd64 - bootID: 49325cb9-9cdd-49cf-94c4-ef3e9e44a6bc - containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8 - kernelVersion: 4.18.0-193.29.1.el8_2.x86_64 - kubeProxyVersion: v1.19.0+9f84db3 - kubeletVersion: v1.19.0+9f84db3 - machineID: 0d99e882a90649948d3a34973a6a2a50 - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa) - systemUUID: 0d99e882-a906-4994-8d3a-34973a6a2a50 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml deleted file mode 100755 index 64ecda005..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master2.yaml +++ /dev/null @@ -1,457 +0,0 @@ ---- -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: master2 - machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/reason: "" - machineconfiguration.openshift.io/state: Done - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2020-11-25T07:56:25Z" - finalizers: - - metal3.io/capbm - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: master2 - kubernetes.io/os: linux - node-role.kubernetes.io/master: "" - node-role.kubernetes.io/virtual: "" - node.openshift.io/os_id: rhcos - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:k8s.ovn.org/l3-gateway-config: {} - f:k8s.ovn.org/node-chassis-id: {} - f:k8s.ovn.org/node-join-subnets: {} - f:k8s.ovn.org/node-local-nat-ip: {} - f:k8s.ovn.org/node-mgmt-port-mac-address: {} - f:k8s.ovn.org/node-primary-ifaddr: {} - f:k8s.ovn.org/node-subnets: {} - manager: ovnkube - operation: Update - time: "2020-11-25T07:59:01Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:beta.kubernetes.io/arch: {} - f:beta.kubernetes.io/os: {} - f:spec: - f:podCIDR: {} - f:podCIDRs: - .: {} - v:"10.132.0.0/24": {} - f:taints: {} - manager: kube-controller-manager - operation: Update - time: "2020-11-25T07:59:15Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/currentConfig: {} - f:machineconfiguration.openshift.io/desiredConfig: {} - f:machineconfiguration.openshift.io/reason: {} - f:machineconfiguration.openshift.io/state: {} - manager: machine-config-daemon - operation: Update - time: "2020-11-25T08:01:34Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machine.openshift.io/machine: {} - manager: nodelink-controller - operation: Update - time: "2020-11-25T08:14:28Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:finalizers: - .: {} - v:"metal3.io/capbm": {} - f:spec: - f:providerID: {} - manager: machine-controller-manager - operation: Update - time: "2020-11-25T08:14:29Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:node-role.kubernetes.io/virtual: {} - manager: kubectl-label - operation: Update - time: "2020-11-25T08:35:46Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: {} - f:volumes.kubernetes.io/controller-managed-attach-detach: {} - f:labels: - .: {} - f:kubernetes.io/arch: {} - f:kubernetes.io/hostname: {} - f:kubernetes.io/os: {} - f:node-role.kubernetes.io/master: {} - f:node.openshift.io/os_id: {} - f:status: - f:addresses: - .: {} - k:{"type":"Hostname"}: - .: {} - f:address: {} - f:type: {} - k:{"type":"InternalIP"}: - .: {} - f:address: {} - f:type: {} - f:allocatable: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:capacity: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:conditions: - .: {} - k:{"type":"DiskPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"MemoryPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"PIDPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"Ready"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - f:daemonEndpoints: - f:kubeletEndpoint: - f:Port: {} - f:images: {} - f:nodeInfo: - f:architecture: {} - f:bootID: {} - f:containerRuntimeVersion: {} - f:kernelVersion: {} - f:kubeProxyVersion: {} - f:kubeletVersion: {} - f:machineID: {} - f:operatingSystem: {} - f:osImage: {} - f:systemUUID: {} - manager: kubelet - operation: Update - time: "2021-02-19T00:30:04Z" - name: master2 - resourceVersion: "38562354" - selfLink: /api/v1/nodes/master2 - uid: 10695ccf-ea74-42ae-aef8-f1056bd428ef -spec: - providerID: baremetalhost:///openshift-machine-api/cnfd1-master-2 - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -status: - addresses: - - address: master2 - type: Hostname - allocatable: - cpu: 7500m - ephemeral-storage: "94993016264" - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 31782572Ki - pods: "250" - capacity: - cpu: "8" - ephemeral-storage: 101796Mi - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 32933548Ki - pods: "250" - conditions: - - lastHeartbeatTime: "2021-02-19T00:30:04Z" - lastTransitionTime: "2020-11-25T07:56:25Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2021-02-19T00:30:04Z" - lastTransitionTime: "2020-11-25T07:56:25Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2021-02-19T00:30:04Z" - lastTransitionTime: "2020-11-25T07:56:25Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2021-02-19T00:30:04Z" - lastTransitionTime: "2020-11-25T07:59:15Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 884336421 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ad6d87a8e1eee9fac58a6c85e34d8186509075f8ed2f1fe5efc9c9dda5138e00 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 783126428 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 687443805 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:796617722a915d79b478c9623b1d152a397478a7f6ba7ec71d39b9df2668cc80 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 673038171 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 505930943 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 486536450 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 418066712 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8b90545c9921788719f6653263fd8ba124b4545a9c0c078fdb8534b9ba5fa4f3 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 410819926 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 375119644 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 372122608 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f08231b8e9948d4894ff6e9a0f1b4aee1534ddb11bea8a9d9b53b2473e83a880 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 363172829 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9df19f010a2d4369d31278a842488e11b3cd24d3134efe335cea5884f63c501e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 344855408 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 342541880 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341937980 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341611087 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 340736830 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339726486 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339116800 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 338045804 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4aa2cfd65a6d1ae112f591eb59336a05df72b59f6e053c418bfd5424ed372608 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 337811610 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 331223794 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 330508411 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 326516257 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321353407 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320374187 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320369930 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 319394632 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 318520120 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317658369 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4e580c9df1a09ab8e0647fc0e378d792a4c3078b4a06120264ab00917e71e783 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 316367713 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 316357508 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 309676114 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:423e5b0624ed0bb736c5320c37611b72dcbb2094e785c2ab588f584f65157289 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 308423009 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305778268 - - names: - - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826 - - quay.io/openshift/origin-sriov-network-device-plugin@sha256: - sizeBytes: 305304768 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305027413 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304947009 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304593198 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 297518232 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 295059596 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9f2296b282ae835a2345ca15bb2aa36d0a0178283cf76ebb2f3d26b34ac493bf - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 291968312 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 278561358 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 275648050 - - names: - - quay.io/openshift/origin-sriov-infiniband-cni@sha256:1b2878bcf2834fc94311680c51be12b0035f843cfe17ce1a2cfeae6823e49d14 - - quay.io/openshift/origin-sriov-infiniband-cni@sha256: - sizeBytes: 271428718 - - names: - - quay.io/openshift/origin-sriov-cni@sha256:122413b37f91bfb890f50ad435f93b247ef3a8f6fabb441c634750567d1781b4 - - quay.io/openshift/origin-sriov-cni@sha256: - sizeBytes: 269939282 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:00900d48c5796ecb8c0599ab6a0946347947dbcd2acc883665240c2ec9b33fd5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 269278836 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:931e5b5dd5e6e36ed70cd72a07574f74408dfd371e0b3f8d41f78b4d99790bc1 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 257341738 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:db5160ca401c5e0a59d5488f41ab78177bacb4d0369a8c9c96149ef196d95852 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 252463529 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:531dcce5496318f2f32e008bf6cd03e713a36e73ea6fa8bdbf560a9c6c7f5b14 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 249896230 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cb639ce34790f2eb0bfae7bbe13806028d0d75f55d3eea63fd1f62677082c17c - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 241441090 - nodeInfo: - architecture: amd64 - bootID: ff6ea3bb-8793-46f3-b95d-b2b631961ebb - containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8 - kernelVersion: 4.18.0-193.29.1.el8_2.x86_64 - kubeProxyVersion: v1.19.0+9f84db3 - kubeletVersion: v1.19.0+9f84db3 - machineID: 3b1f2fe4d89b47789345c4f6bd8b0cf1 - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa) - systemUUID: 3b1f2fe4-d89b-4778-9345-c4f6bd8b0cf1 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml deleted file mode 100755 index 0251741ba..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/master3.yaml +++ /dev/null @@ -1,457 +0,0 @@ ---- -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: openshift-machine-api/master3 - machineconfiguration.openshift.io/currentConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/desiredConfig: rendered-master-e92b311a0208749dbba5c4458afcc653 - machineconfiguration.openshift.io/reason: "" - machineconfiguration.openshift.io/state: Done - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2020-11-25T07:56:27Z" - finalizers: - - metal3.io/capbm - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: master3 - kubernetes.io/os: linux - node-role.kubernetes.io/master: "" - node-role.kubernetes.io/virtual: "" - node.openshift.io/os_id: rhcos - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:beta.kubernetes.io/arch: {} - f:beta.kubernetes.io/os: {} - f:spec: - f:podCIDR: {} - f:podCIDRs: - .: {} - v:"10.132.1.0/24": {} - f:taints: {} - manager: kube-controller-manager - operation: Update - time: "2020-11-25T07:59:08Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/currentConfig: {} - f:machineconfiguration.openshift.io/desiredConfig: {} - f:machineconfiguration.openshift.io/reason: {} - f:machineconfiguration.openshift.io/state: {} - manager: machine-config-daemon - operation: Update - time: "2020-11-25T08:01:36Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machine.openshift.io/machine: {} - manager: nodelink-controller - operation: Update - time: "2020-11-25T08:14:20Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:finalizers: - .: {} - v:"metal3.io/capbm": {} - f:spec: - f:providerID: {} - manager: machine-controller-manager - operation: Update - time: "2020-11-25T08:14:21Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:node-role.kubernetes.io/virtual: {} - manager: kubectl-label - operation: Update - time: "2020-11-25T08:35:46Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:k8s.ovn.org/l3-gateway-config: {} - f:k8s.ovn.org/node-chassis-id: {} - f:k8s.ovn.org/node-join-subnets: {} - f:k8s.ovn.org/node-local-nat-ip: {} - f:k8s.ovn.org/node-mgmt-port-mac-address: {} - f:k8s.ovn.org/node-primary-ifaddr: {} - f:k8s.ovn.org/node-subnets: {} - manager: ovnkube - operation: Update - time: "2020-12-23T11:34:09Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: {} - f:volumes.kubernetes.io/controller-managed-attach-detach: {} - f:labels: - .: {} - f:kubernetes.io/arch: {} - f:kubernetes.io/hostname: {} - f:kubernetes.io/os: {} - f:node-role.kubernetes.io/master: {} - f:node.openshift.io/os_id: {} - f:status: - f:addresses: - .: {} - k:{"type":"Hostname"}: - .: {} - f:address: {} - f:type: {} - k:{"type":"InternalIP"}: - .: {} - f:address: {} - f:type: {} - f:allocatable: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:capacity: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:conditions: - .: {} - k:{"type":"DiskPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"MemoryPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"PIDPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"Ready"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - f:daemonEndpoints: - f:kubeletEndpoint: - f:Port: {} - f:images: {} - f:nodeInfo: - f:architecture: {} - f:bootID: {} - f:containerRuntimeVersion: {} - f:kernelVersion: {} - f:kubeProxyVersion: {} - f:kubeletVersion: {} - f:machineID: {} - f:operatingSystem: {} - f:osImage: {} - f:systemUUID: {} - manager: kubelet - operation: Update - time: "2021-02-19T00:31:12Z" - name: master3 - resourceVersion: "38562754" - selfLink: /api/v1/nodes/master3 - uid: 94aabdc5-8e6f-4965-896d-3d68c8043fbf -spec: - providerID: baremetalhost:///openshift-machine-api/cnfd1-master-0 - taints: - - effect: NoSchedule - key: node-role.kubernetes.io/master -status: - addresses: - - address: master3 - type: Hostname - allocatable: - cpu: 7500m - ephemeral-storage: "94993016264" - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 31782368Ki - pods: "250" - capacity: - cpu: "8" - ephemeral-storage: 101796Mi - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 32933344Ki - pods: "250" - conditions: - - lastHeartbeatTime: "2021-02-19T00:31:12Z" - lastTransitionTime: "2020-11-25T07:56:27Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2021-02-19T00:31:12Z" - lastTransitionTime: "2020-11-25T07:56:27Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2021-02-19T00:31:12Z" - lastTransitionTime: "2020-11-25T07:56:27Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2021-02-19T00:31:12Z" - lastTransitionTime: "2020-11-25T07:59:08Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 884336421 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f27a23cd9f23951711f8aa7d66d4a6a1fd68071fa98ac0d5077a160a5d05f922 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 774713580 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d849673f6cc38712f0add9d478a6326f1f6c2d3e739f6b81574a403dabba0bd3 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 687443805 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:102e8cced32335144b5567ed3159d31aa267d0b1f2e8de8454d53b175e1df718 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 519118014 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 505930943 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 486536450 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e21960829179b702d31bb220f8b61b9715b8e0fd91d671b8615b0a8599cf1f0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 478316539 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 418066712 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 375119644 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 372122608 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:248079028275bd57deb5c810fc91b5c4e9138f706084bfa953aa64c833652ef0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 348879632 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:326516b79a528dc627e5a5d84c986fd35e5f8ff5cbd74ff0ef802473efccd285 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 342541880 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341937980 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47c2f751ab0d5ee88e2826749f1372e6a24db3d0c0c942136ae84db17cb7f086 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341659335 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341611087 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e626fa44d64c4b9bf6dc66fafa7fa4e640eaeb15359d2f40bb0772c351b4dab5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 340736830 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:831f29043e6a2933169c6595281c58c3c7e31232866e1ffe1130845d7b7744af - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 340684520 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:3a9e0e5bccf522e7e9537bf626dd01f9894228b7a16573d209bf4856798e8e57 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339726486 - - names: - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256:63c162756ed6b5e67daafbd34f636ca461a18ea12f1352ae6172d27c9c95aff8 - - quay.io/openshift/origin-sriov-dp-admission-controller@sha256: - sizeBytes: 339116800 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c052581031d9cb44b7e5a571db1cea25854733a977584a67718100cac56e2160 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 338045804 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4aa2cfd65a6d1ae112f591eb59336a05df72b59f6e053c418bfd5424ed372608 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 337811610 - - names: - - quay.io/openshift/origin-must-gather@sha256:c42733fdc4d028c582f745822cd5bc4cfb924ebba62e2a9fb410e7bc255fe1f9 - - quay.io/openshift/origin-must-gather:latest - sizeBytes: 335337156 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 331223794 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 330508411 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 326516257 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f6746db8ee59600c8c3936d035aa30ad81890de42814ec0fafd12809a0c8eb39 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321353407 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:15d31443dbc6830af67840c6a199e3b93b03168d4d993e453bbacde702d4c25e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320374187 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:12531b40785d46fde636bedbe93f549c7a9bd5eab146468927ae8347fb9e4aac - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320369930 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cad230fbee655fa6a021a06d0a7e0888f7fec60127e467b18ec6ba93bcfc1d98 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 319394632 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b93d895f9b0733c924651a7f2ab3d0bb3854f4202eb55cb2086f13a4ce7aae84 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 318520120 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6aece72b8448aaf5533f64674acbddf8b51d21355336807e85e59f3bac25d3e7 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 317658369 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4e580c9df1a09ab8e0647fc0e378d792a4c3078b4a06120264ab00917e71e783 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 316367713 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fa21f4b6288e4665090901e4904f12b0eae1a23d24fefaa06ba951b2b4ce017f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 316357508 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:34fbc764dec54739ee9d466ef5f56cfdbd6d6e5784534e28da9eb0d1f011ef72 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 314684850 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:73f54514498459ae65d33619ab9208248bf217b67c115b74aa6688662e9e111a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 311647548 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8cc28a3ed35c28e3184f33e3a7e8f4755af57ea1c321b3874d18acba711a8104 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 309676114 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:904e22f8c8970422b10208295ce05cacd9dc15fa0433806cb1b2035c74db193e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 308789532 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305778268 - - names: - - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826 - - quay.io/openshift/origin-sriov-network-device-plugin@sha256: - sizeBytes: 305304768 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305027413 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7211fbc109efa51e20b4be2f5f327e00127076423ef384bde250d909da95257f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304947009 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304593198 - - names: - - quay.io/openshift/origin-sriov-network-operator@sha256:b125f8f9990b7341c704d365ad65b4ef17d3903c6be223a3301c7492b4e16b02 - - quay.io/openshift/origin-sriov-network-operator@sha256: - sizeBytes: 299461624 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:5981ec1e4592b082b0a3e20b95da65863505b602867b0550772bd8f28e1cfd10 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 297518232 - - names: - - quay.io/openshift/origin-sriov-network-webhook@sha256:57a58e1b2d8d3bd34555375f8f06b805745010f77854fada89a8219ad0237635 - - quay.io/openshift/origin-sriov-network-webhook@sha256: - sizeBytes: 295059596 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1855db32ca2d846c9ad9af104d2e27ffa41b1054af031ac3d19e412c330fc66e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 278561358 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f80375a7ad29fb23de302b0e82ae460580681d1805829c214bad13e84d94b784 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 275648050 - - names: - - quay.io/openshift/origin-sriov-infiniband-cni@sha256:1b2878bcf2834fc94311680c51be12b0035f843cfe17ce1a2cfeae6823e49d14 - - quay.io/openshift/origin-sriov-infiniband-cni@sha256: - sizeBytes: 271428718 - - names: - - quay.io/openshift/origin-sriov-cni@sha256:122413b37f91bfb890f50ad435f93b247ef3a8f6fabb441c634750567d1781b4 - - quay.io/openshift/origin-sriov-cni@sha256: - sizeBytes: 269939282 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:00900d48c5796ecb8c0599ab6a0946347947dbcd2acc883665240c2ec9b33fd5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 269278836 - nodeInfo: - architecture: amd64 - bootID: bd5c30ee-8db5-49fa-a85a-59f1c78d217e - containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8 - kernelVersion: 4.18.0-193.29.1.el8_2.x86_64 - kubeProxyVersion: v1.19.0+9f84db3 - kubeletVersion: v1.19.0+9f84db3 - machineID: ec4898e8241e4438aabada39dcbb6568 - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa) - systemUUID: ec4898e8-241e-4438-aaba-da39dcbb6568 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml deleted file mode 100755 index dcbb8b563..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker1.yaml +++ /dev/null @@ -1,490 +0,0 @@ -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: worker1 - machineconfiguration.openshift.io/currentConfig: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a - machineconfiguration.openshift.io/desiredConfig: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a - machineconfiguration.openshift.io/reason: "" - machineconfiguration.openshift.io/state: Done - sriovnetwork.openshift.io/state: Idle - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2020-11-25T09:31:11Z" - finalizers: - - metal3.io/capbm - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: worker1 - kubernetes.io/os: linux - node-role.kubernetes.io/worker: "" - node-role.kubernetes.io/worker-cnf: "" - node.openshift.io/os_id: rhcos - sriov: "true" - sriov1: "true" - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:finalizers: - .: {} - v:"metal3.io/capbm": {} - f:spec: - f:providerID: {} - manager: machine-controller-manager - operation: Update - time: "2020-11-25T09:31:12Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machine.openshift.io/machine: {} - manager: nodelink-controller - operation: Update - time: "2020-11-25T09:31:12Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:k8s.ovn.org/l3-gateway-config: {} - f:k8s.ovn.org/node-chassis-id: {} - f:k8s.ovn.org/node-join-subnets: {} - f:k8s.ovn.org/node-local-nat-ip: {} - f:k8s.ovn.org/node-mgmt-port-mac-address: {} - f:k8s.ovn.org/node-primary-ifaddr: {} - f:k8s.ovn.org/node-subnets: {} - manager: ovnkube - operation: Update - time: "2020-11-25T09:31:54Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:sriov: {} - f:sriov1: {} - manager: oc - operation: Update - time: "2020-11-26T09:51:05Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:node-role.kubernetes.io/worker-cnf: {} - manager: kubectl-label - operation: Update - time: "2021-01-14T13:44:04Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/desiredConfig: {} - manager: machine-config-controller - operation: Update - time: "2021-02-09T08:46:19Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/currentConfig: {} - f:machineconfiguration.openshift.io/reason: {} - f:machineconfiguration.openshift.io/state: {} - manager: machine-config-daemon - operation: Update - time: "2021-02-09T08:54:28Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:beta.kubernetes.io/arch: {} - f:beta.kubernetes.io/os: {} - f:spec: - f:podCIDR: {} - f:podCIDRs: - .: {} - v:"10.132.4.0/24": {} - manager: kube-controller-manager - operation: Update - time: "2021-02-09T08:54:37Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:sriovnetwork.openshift.io/state: {} - manager: sriov-network-config-daemon - operation: Update - time: "2021-02-09T08:58:16Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: {} - f:volumes.kubernetes.io/controller-managed-attach-detach: {} - f:labels: - .: {} - f:kubernetes.io/arch: {} - f:kubernetes.io/hostname: {} - f:kubernetes.io/os: {} - f:node-role.kubernetes.io/worker: {} - f:node.openshift.io/os_id: {} - f:status: - f:addresses: - .: {} - k:{"type":"Hostname"}: - .: {} - f:address: {} - f:type: {} - k:{"type":"InternalIP"}: - .: {} - f:address: {} - f:type: {} - f:allocatable: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:openshift.io/mainpfresource: {} - f:openshift.io/sriovresource: {} - f:pods: {} - f:capacity: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:openshift.io/mainpfresource: {} - f:openshift.io/sriovresource: {} - f:pods: {} - f:conditions: - .: {} - k:{"type":"DiskPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"MemoryPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"PIDPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"Ready"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - f:daemonEndpoints: - f:kubeletEndpoint: - f:Port: {} - f:images: {} - f:nodeInfo: - f:architecture: {} - f:bootID: {} - f:containerRuntimeVersion: {} - f:kernelVersion: {} - f:kubeProxyVersion: {} - f:kubeletVersion: {} - f:machineID: {} - f:operatingSystem: {} - f:osImage: {} - f:systemUUID: {} - manager: kubelet - operation: Update - time: "2021-02-19T00:29:17Z" - name: worker1 - resourceVersion: "38561991" - selfLink: /api/v1/nodes/worker1 - uid: a81c784d-04ca-4674-b50a-fc34586f92b8 -spec: - providerID: baremetalhost:///openshift-machine-api/cnfd1-worker-1 -status: - addresses: - - address: worker1 - type: Hostname - allocatable: - cpu: "75" - ephemeral-storage: "429960199479" - hugepages-1Gi: 1Gi - hugepages-2Mi: 256Mi - memory: 392289704Ki - openshift.io/mainpfresource: "0" - openshift.io/sriovresource: "16" - pods: "250" - capacity: - cpu: "80" - ephemeral-storage: 456740Mi - hugepages-1Gi: 1Gi - hugepages-2Mi: 256Mi - memory: 394726824Ki - openshift.io/mainpfresource: "0" - openshift.io/sriovresource: "16" - pods: "250" - conditions: - - lastHeartbeatTime: "2021-02-19T00:29:17Z" - lastTransitionTime: "2021-02-09T08:53:47Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2021-02-19T00:29:17Z" - lastTransitionTime: "2021-02-09T08:53:47Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2021-02-19T00:29:17Z" - lastTransitionTime: "2021-02-09T08:53:47Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2021-02-19T00:29:17Z" - lastTransitionTime: "2021-02-09T08:53:47Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - quay.io/openshift-kni/cnf-tests@sha256:23d5b605bd234802b4923a7fb45ca12ae1ebcebc965900a1903593f5e6f6e64e - - quay.io/openshift-kni/cnf-tests:4.5 - sizeBytes: 1097313747 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053932844 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053932842 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053756716 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053629741 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053609261 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601070 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601069 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601068 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601068 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601068 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601068 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053601065 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531438 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531437 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531437 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531437 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531436 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531435 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531435 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531435 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 1053531435 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048313133 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048313133 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048313133 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048313132 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048313132 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048202542 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048202541 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048202541 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 1048202541 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a07465e873bd8a4258f92ed43e2855d83c7d43ce5ee0010cd2d0c38966fefc24 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 1037703019 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 884336421 - - names: - - quay.io/openshift/origin-sriov-network-config-daemon@sha256:22b2c92e8c1bfc0fba4c6cd757bda1a869647f90d4ce188c3fffc6ea785535c0 - - quay.io/openshift/origin-sriov-network-config-daemon@sha256: - sizeBytes: 860936634 - - names: - - quay.io/swsehgal/perf-tools@sha256:529cc1c652599fcecc92c4b4334e1d593dfc43ebf1e327485a6ba2d863cc419d - - quay.io/swsehgal/perf-tools:latest - sizeBytes: 845919423 - - names: - - quay.io/openshift/origin-sriov-network-config-daemon@sha256:64e74fb162152cd81ab65b9f8fcbe4be8c570500d0a6c201955bed8004673ee3 - - quay.io/openshift/origin-sriov-network-config-daemon@sha256: - sizeBytes: 749741996 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 674694444 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 674682158 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 674518316 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 674465069 - nodeInfo: - architecture: amd64 - bootID: a4499a06-7e13-492f-8537-b143f58c80b8 - containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8 - kernelVersion: 4.18.0-193.28.1.rt13.77.el8_2.x86_64 - kubeProxyVersion: v1.19.0+9f84db3 - kubeletVersion: v1.19.0+9f84db3 - machineID: b407dceb69cb4148a6eaa208f24ef788 - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa) - systemUUID: 4c4c4544-0048-3510-8056-b3c04f505832 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker2.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker2.yaml deleted file mode 100755 index ff87af6b5..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/core/nodes/worker2.yaml +++ /dev/null @@ -1,465 +0,0 @@ ---- -apiVersion: v1 -kind: Node -metadata: - annotations: - machine.openshift.io/machine: worker2 - machineconfiguration.openshift.io/currentConfig: rendered-worker-ab646b3e1548515048010140962bf08d - machineconfiguration.openshift.io/desiredConfig: rendered-worker-ab646b3e1548515048010140962bf08d - machineconfiguration.openshift.io/reason: "" - machineconfiguration.openshift.io/state: Done - sriovnetwork.openshift.io/state: Idle - volumes.kubernetes.io/controller-managed-attach-detach: "true" - creationTimestamp: "2020-11-25T08:22:32Z" - finalizers: - - metal3.io/capbm - labels: - beta.kubernetes.io/arch: amd64 - beta.kubernetes.io/os: linux - kubernetes.io/arch: amd64 - kubernetes.io/hostname: worker2 - kubernetes.io/os: linux - node-role.kubernetes.io/worker: "" - node.openshift.io/os_id: rhcos - managedFields: - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machine.openshift.io/machine: {} - manager: nodelink-controller - operation: Update - time: "2020-11-25T08:22:34Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:finalizers: - .: {} - v:"metal3.io/capbm": {} - f:spec: - f:providerID: {} - manager: machine-controller-manager - operation: Update - time: "2020-11-25T08:22:35Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:sriovnetwork.openshift.io/state: {} - manager: sriov-network-config-daemon - operation: Update - time: "2020-11-25T11:21:05Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:k8s.ovn.org/l3-gateway-config: {} - f:k8s.ovn.org/node-chassis-id: {} - f:k8s.ovn.org/node-join-subnets: {} - f:k8s.ovn.org/node-local-nat-ip: {} - f:k8s.ovn.org/node-mgmt-port-mac-address: {} - f:k8s.ovn.org/node-primary-ifaddr: {} - f:k8s.ovn.org/node-subnets: {} - manager: ovnkube - operation: Update - time: "2021-01-13T01:54:45Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/desiredConfig: {} - manager: machine-config-controller - operation: Update - time: "2021-01-14T11:53:15Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - f:beta.kubernetes.io/arch: {} - f:beta.kubernetes.io/os: {} - f:spec: - f:podCIDR: {} - f:podCIDRs: - .: {} - v:"10.132.3.0/24": {} - manager: kube-controller-manager - operation: Update - time: "2021-01-14T11:58:47Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - f:machineconfiguration.openshift.io/currentConfig: {} - f:machineconfiguration.openshift.io/reason: {} - f:machineconfiguration.openshift.io/state: {} - manager: machine-config-daemon - operation: Update - time: "2021-01-14T11:59:22Z" - - apiVersion: v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:annotations: - .: {} - f:volumes.kubernetes.io/controller-managed-attach-detach: {} - f:labels: - .: {} - f:kubernetes.io/arch: {} - f:kubernetes.io/hostname: {} - f:kubernetes.io/os: {} - f:node-role.kubernetes.io/worker: {} - f:node.openshift.io/os_id: {} - f:status: - f:addresses: - .: {} - k:{"type":"Hostname"}: - .: {} - f:address: {} - f:type: {} - k:{"type":"InternalIP"}: - .: {} - f:address: {} - f:type: {} - f:allocatable: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:capacity: - .: {} - f:cpu: {} - f:ephemeral-storage: {} - f:hugepages-1Gi: {} - f:hugepages-2Mi: {} - f:memory: {} - f:pods: {} - f:conditions: - .: {} - k:{"type":"DiskPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"MemoryPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"PIDPressure"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - k:{"type":"Ready"}: - .: {} - f:lastHeartbeatTime: {} - f:lastTransitionTime: {} - f:message: {} - f:reason: {} - f:status: {} - f:type: {} - f:daemonEndpoints: - f:kubeletEndpoint: - f:Port: {} - f:images: {} - f:nodeInfo: - f:architecture: {} - f:bootID: {} - f:containerRuntimeVersion: {} - f:kernelVersion: {} - f:kubeProxyVersion: {} - f:kubeletVersion: {} - f:machineID: {} - f:operatingSystem: {} - f:osImage: {} - f:systemUUID: {} - manager: kubelet - operation: Update - time: "2021-02-19T00:31:54Z" - name: worker2 - resourceVersion: "38563046" - selfLink: /api/v1/nodes/worker2 - uid: 19aa6782-98c7-44a0-a57f-acc4ac4881cd -spec: - providerID: baremetalhost:///openshift-machine-api/cnfd1-worker-0 -status: - addresses: - - address: worker2 - type: Hostname - allocatable: - cpu: 23500m - ephemeral-storage: "94993016264" - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 15263896Ki - pods: "250" - capacity: - cpu: "24" - ephemeral-storage: 101796Mi - hugepages-1Gi: "0" - hugepages-2Mi: "0" - memory: 16414872Ki - pods: "250" - conditions: - - lastHeartbeatTime: "2021-02-19T00:31:54Z" - lastTransitionTime: "2021-01-14T11:58:36Z" - message: kubelet has sufficient memory available - reason: KubeletHasSufficientMemory - status: "False" - type: MemoryPressure - - lastHeartbeatTime: "2021-02-19T00:31:54Z" - lastTransitionTime: "2021-01-14T11:58:36Z" - message: kubelet has no disk pressure - reason: KubeletHasNoDiskPressure - status: "False" - type: DiskPressure - - lastHeartbeatTime: "2021-02-19T00:31:54Z" - lastTransitionTime: "2021-01-14T11:58:36Z" - message: kubelet has sufficient PID available - reason: KubeletHasSufficientPID - status: "False" - type: PIDPressure - - lastHeartbeatTime: "2021-02-19T00:31:54Z" - lastTransitionTime: "2021-01-14T11:58:46Z" - message: kubelet is posting ready status - reason: KubeletReady - status: "True" - type: Ready - daemonEndpoints: - kubeletEndpoint: - Port: 10250 - images: - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f786db417224d3b4a5456f0a545f2a53b31ee9cc0f559a5738a93154a6367d0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 884336421 - - names: - - quay.io/openshift/origin-sriov-network-config-daemon@sha256:22b2c92e8c1bfc0fba4c6cd757bda1a869647f90d4ce188c3fffc6ea785535c0 - - quay.io/openshift/origin-sriov-network-config-daemon@sha256: - sizeBytes: 860936634 - - names: - - quay.io/openshift/origin-sriov-network-config-daemon@sha256:64e74fb162152cd81ab65b9f8fcbe4be8c570500d0a6c201955bed8004673ee3 - - quay.io/openshift/origin-sriov-network-config-daemon@sha256: - sizeBytes: 749741996 - - names: - - registry.redhat.io/redhat/certified-operator-index@sha256:1972d34d39f2c7c4511ff3b8510e1c3e4738192211387947b23ee9ce0c6e7615 - - registry.redhat.io/redhat/certified-operator-index@sha256:53a416c4f1cc3369f99133e7ddc641d39c16967b826223ec1972dc05bde5f2de - - registry.redhat.io/redhat/certified-operator-index:v4.6 - sizeBytes: 673584429 - - names: - - registry.redhat.io/redhat/redhat-operator-index@sha256:4652c923c242c7feafa1171f537c0390b2a8684f2b7ea5947fad9f7663020005 - - registry.redhat.io/redhat/redhat-operator-index@sha256:d71145aa96b8d17867dc759f0985d5cde2a272029fcd7d3ee36f9ead407f035d - - registry.redhat.io/redhat/redhat-operator-index:v4.6 - sizeBytes: 668300590 - - names: - - registry.redhat.io/redhat/community-operator-index@sha256:735f9b29a65f396e073392b9d97440666ee0ae5d428db3ef81f0d4a98da24080 - - registry.redhat.io/redhat/community-operator-index@sha256:ca94b33771f6b2bf8fe831bb1d4cfcfc314e44cf263a0834d12f59900f97e4d0 - - registry.redhat.io/redhat/community-operator-index:latest - sizeBytes: 516909160 - - names: - - @ - - registry.redhat.io/redhat/community-operator-index: - sizeBytes: 513503120 - - names: - - @ - - registry.redhat.io/redhat/community-operator-index: - sizeBytes: 511012751 - - names: - - @ - - registry.redhat.io/redhat/community-operator-index: - sizeBytes: 510914447 - - names: - - @ - - registry.redhat.io/redhat/community-operator-index: - sizeBytes: 510914447 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29e3f55ba5be8cc2f8a431411fc75c8bf2f07a5b55f4ab9a81c603052c82c5dd - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 505930943 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 502538129 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 502522768 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 502522768 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 500944783 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 500686735 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 500686735 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 500686735 - - names: - - @ - - registry.redhat.io/redhat/certified-operator-index: - sizeBytes: 500686734 - - names: - - registry.redhat.io/redhat/redhat-marketplace-index@sha256:53cf2882cb27663221ec395a3d0d14f6f0071679b491c18932f963cf6eb58317 - - registry.redhat.io/redhat/redhat-marketplace-index@sha256:edb74a7172813dbff3444eecb8d21195c2c95605434668f9c0e77f2065239120 - - registry.redhat.io/redhat/redhat-marketplace-index:v4.6 - sizeBytes: 498916241 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 497852303 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 497202066 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 495546255 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 495546253 - - names: - - @ - - registry.redhat.io/redhat/redhat-operator-index: - sizeBytes: 495546253 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 495161231 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 495079311 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 495079311 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 495079310 - - names: - - @ - - registry.redhat.io/redhat/redhat-marketplace-index: - sizeBytes: 495079310 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4cffc88a97ba39c1a6a9ce45cf406bb29011a72766dc6f4deb0d76f7cd6eb02a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 486536450 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:003d999d37f8d2c043ad9370eaf1e7533b9306bcebea47d62546b6456b0f6924 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 424450062 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c8f0b9a2852b15b45487c08c158e10f3b803d7a77538d6dbc1d991994f58bcee - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256worker2: - sizeBytes: 418066712 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:456dff747967eadbbfc6e9c53b180049bbba09e85cba7d77abe0e36bfc02817a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 375119644 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3b5f75e0bb4f4e2e74aee6016030bfcce9cf71e52244f6fa689a673e619b35a4 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 372122608 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c4d893acabeed686141edd1a0efe2a54a3b344737a59eecdad1c9aa456892193 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 351006144 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e9b7708abd519710c58d6f51870cd1085e436283164cf7ddeb44530a2c4ca12f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 346146912 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a02341afe177329a39e49836e4c49d194affc8c4754fb360f0f760806f3bc2f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341937980 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a95399379b48916216c49b1e4d4786b19081378ccd5821b27d97b496edd56a86 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 341611087 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a501d66461b0f24d2d551ec141617577ed417fdc4bc69db39f919e4e668a2889 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 331223794 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:02656bc5f5d78919453f86fa63c6531c6e0f1bbb4f3abcc662b6726b5619acec - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 330508411 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:759062d6a339a6915d499fc875f365cc1d3e52ededb6249ac19047b98dac9771 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 326516257 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b98c62f6b1c8fc90c4e6ab665d38f12a306198d789486f03b08da35503233c20 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 319387510 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:46b7fb792b3c66da1650b33bc632a90841f6aa18c3dbb581005ab56411f3b480 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 307629969 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:54e61ec737612af1eb38c01fb7829dcba44f4de3cbcb52f029a95c73b9c2d7fb - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305778268 - - names: - - quay.io/openshift/origin-sriov-network-device-plugin@sha256:cb260fd8bd6914e52c3f2899be14990cab15afdd3620194b68013bea7b0e2826 - - quay.io/openshift/origin-sriov-network-device-plugin@sha256: - sizeBytes: 305304768 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b0bbfa15a10b4308f07490670fb94203a5f63d9ad6b40e55b3bb959e9bf731d2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305027413 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d8bdb0125b1287565b5464a6d6181fd9fe706641efd68063bdb317270bd11887 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304593198 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7ebb3554b686260d750e74bcbcf9e731a7610ff55a7a44f541dfc82f3826918e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 297867413 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7c45686013da446f0f20e5e849a6f547d8769ca73fecbff873ecf1d38c6e72bc - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 296659610 - nodeInfo: - architecture: amd64 - bootID: 233ea568-a9e0-4f08-8b49-1abdf1d3d9d0 - containerRuntimeVersion: cri-o://1.19.0-22.rhaos4.6.gitc0306f1.el8 - kernelVersion: 4.18.0-193.29.1.el8_2.x86_64 - kubeProxyVersion: v1.19.0+9f84db3 - kubeletVersion: v1.19.0+9f84db3 - machineID: 2d4c8e7af53440dda2904a0b0b3d790a - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 46.82.202011061621-0 (Ootpa) - systemUUID: 2d4c8e7a-f534-40dd-a290-4a0b0b3d790a diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml deleted file mode 100755 index bde8154d5..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml +++ /dev/null @@ -1,140 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2020-11-25T07:59:09Z" - generation: 2 - labels: - machineconfiguration.openshift.io/mco-built-in: "" - operator.machineconfiguration.openshift.io/required-for-upgrade: "" - pools.operator.machineconfiguration.openshift.io/master: "" - managedFields: - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: {} - f:machineconfiguration.openshift.io/mco-built-in: {} - f:operator.machineconfiguration.openshift.io/required-for-upgrade: {} - f:pools.operator.machineconfiguration.openshift.io/master: {} - f:spec: - .: {} - f:configuration: {} - f:machineConfigSelector: - .: {} - f:matchLabels: - .: {} - f:machineconfiguration.openshift.io/role: {} - f:nodeSelector: - .: {} - f:matchLabels: - .: {} - f:node-role.kubernetes.io/master: {} - f:paused: {} - manager: machine-config-operator - operation: Update - time: "2020-11-25T07:59:09Z" - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:name: {} - f:source: {} - f:status: - .: {} - f:conditions: {} - f:configuration: - .: {} - f:name: {} - f:source: {} - f:degradedMachineCount: {} - f:machineCount: {} - f:observedGeneration: {} - f:readyMachineCount: {} - f:unavailableMachineCount: {} - f:updatedMachineCount: {} - manager: machine-config-controller - operation: Update - time: "2020-11-25T08:01:39Z" - name: master - resourceVersion: "11223" - selfLink: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/master - uid: 8589c30f-ea32-49cb-8bf8-4ba9dad44856 -spec: - configuration: - name: rendered-master-e92b311a0208749dbba5c4458afcc653 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-master - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-ssh - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: master - nodeSelector: - matchLabels: - node-role.kubernetes.io/master: "" - paused: false -status: - conditions: - - lastTransitionTime: "2020-11-25T08:00:37Z" - message: "" - reason: "" - status: "False" - type: RenderDegraded - - lastTransitionTime: "2020-11-25T08:01:39Z" - message: All nodes are updated with rendered-master-e92b311a0208749dbba5c4458afcc653 - reason: "" - status: "True" - type: Updated - - lastTransitionTime: "2020-11-25T08:01:39Z" - message: "" - reason: "" - status: "False" - type: Updating - - lastTransitionTime: "2020-11-25T08:01:39Z" - message: "" - reason: "" - status: "False" - type: NodeDegraded - - lastTransitionTime: "2020-11-25T08:01:39Z" - message: "" - reason: "" - status: "False" - type: Degraded - configuration: - name: rendered-master-e92b311a0208749dbba5c4458afcc653 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-master - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-ssh - degradedMachineCount: 0 - machineCount: 3 - observedGeneration: 2 - readyMachineCount: 3 - unavailableMachineCount: 0 - updatedMachineCount: 3 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker-cnf.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker-cnf.yaml deleted file mode 100755 index 2141b789f..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker-cnf.yaml +++ /dev/null @@ -1,157 +0,0 @@ ---- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2021-01-14T11:25:30Z" - generation: 44 - labels: - machineconfiguration.openshift.io/role: worker-cnf - managedFields: - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: {} - f:machineconfiguration.openshift.io/role: {} - f:spec: - .: {} - f:machineConfigSelector: - .: {} - f:matchExpressions: {} - f:nodeSelector: - .: {} - f:matchLabels: - .: {} - f:node-role.kubernetes.io/worker-cnf: {} - f:paused: {} - manager: kubectl-create - operation: Update - time: "2021-01-14T11:25:30Z" - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - .: {} - f:name: {} - f:source: {} - f:status: - .: {} - f:conditions: {} - f:configuration: - .: {} - f:name: {} - f:source: {} - f:degradedMachineCount: {} - f:machineCount: {} - f:observedGeneration: {} - f:readyMachineCount: {} - f:unavailableMachineCount: {} - f:updatedMachineCount: {} - manager: machine-config-controller - operation: Update - time: "2021-02-09T08:58:21Z" - name: worker-cnf - resourceVersion: "33988039" - selfLink: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/worker-cnf - uid: 54244533-5ccd-4f53-b760-0e656cd417ad -spec: - configuration: - name: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 50-nto-worker-cnf - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-cnf-generated-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: performance-example-performanceprofile - machineConfigSelector: - matchExpressions: - - key: machineconfiguration.openshift.io/role - operator: In - values: - - worker - - worker-cnf - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker-cnf: "" - paused: false -status: - conditions: - - lastTransitionTime: "2021-01-14T11:25:36Z" - message: "" - reason: "" - status: "False" - type: RenderDegraded - - lastTransitionTime: "2021-01-14T11:25:40Z" - message: "" - reason: "" - status: "False" - type: NodeDegraded - - lastTransitionTime: "2021-01-14T11:25:40Z" - message: "" - reason: "" - status: "False" - type: Degraded - - lastTransitionTime: "2021-02-09T08:58:21Z" - message: All nodes are updated with rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a - reason: "" - status: "True" - type: Updated - - lastTransitionTime: "2021-02-09T08:58:21Z" - message: "" - reason: "" - status: "False" - type: Updating - configuration: - name: rendered-worker-cnf-7c30be8313249d4d85afa8cc3f538b3a - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 50-nto-worker-cnf - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-cnf-generated-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: performance-example-performanceprofile - degradedMachineCount: 0 - machineCount: 1 - observedGeneration: 44 - readyMachineCount: 1 - unavailableMachineCount: 0 - updatedMachineCount: 1 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml deleted file mode 100755 index f85580266..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml +++ /dev/null @@ -1,139 +0,0 @@ ---- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2020-11-25T07:59:09Z" - generation: 74 - labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - managedFields: - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: {} - f:machineconfiguration.openshift.io/mco-built-in: {} - f:pools.operator.machineconfiguration.openshift.io/worker: {} - f:spec: - .: {} - f:configuration: {} - f:machineConfigSelector: - .: {} - f:matchLabels: - .: {} - f:machineconfiguration.openshift.io/role: {} - f:nodeSelector: - .: {} - f:matchLabels: - .: {} - f:node-role.kubernetes.io/worker: {} - f:paused: {} - manager: machine-config-operator - operation: Update - time: "2020-11-25T07:59:09Z" - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:name: {} - f:source: {} - f:status: - .: {} - f:conditions: {} - f:configuration: - .: {} - f:name: {} - f:source: {} - f:degradedMachineCount: {} - f:machineCount: {} - f:observedGeneration: {} - f:readyMachineCount: {} - f:unavailableMachineCount: {} - f:updatedMachineCount: {} - manager: machine-config-controller - operation: Update - time: "2021-01-14T13:44:09Z" - name: worker - resourceVersion: "21744755" - selfLink: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/worker - uid: b687b283-3d1a-40fc-aac1-65b5d65eb3ea -spec: - configuration: - name: rendered-worker-ab646b3e1548515048010140962bf08d - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" - paused: false -status: - conditions: - - lastTransitionTime: "2020-11-25T08:00:37Z" - message: "" - reason: "" - status: "False" - type: RenderDegraded - - lastTransitionTime: "2020-11-25T08:00:42Z" - message: "" - reason: "" - status: "False" - type: NodeDegraded - - lastTransitionTime: "2020-11-25T08:00:42Z" - message: "" - reason: "" - status: "False" - type: Degraded - - lastTransitionTime: "2021-01-14T12:11:43Z" - message: All nodes are updated with rendered-worker-ab646b3e1548515048010140962bf08d - reason: "" - status: "True" - type: Updated - - lastTransitionTime: "2021-01-14T12:11:43Z" - message: "" - reason: "" - status: "False" - type: Updating - configuration: - name: rendered-worker-ab646b3e1548515048010140962bf08d - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - degradedMachineCount: 0 - machineCount: 1 - observedGeneration: 74 - readyMachineCount: 1 - unavailableMachineCount: 0 - updatedMachineCount: 1 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker1/sysinfo.tgz b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker1/sysinfo.tgz deleted file mode 100644 index df9636708fa3adbd2b865c3a40fe6bd465e9407a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 196952 zcmaHTWk6J0+xAgG1qo>>IUqHpNP~dHkkXxl2!exjN(xAeNF&W4Ff`JQ2!eogN(~_) z-3{~Y8H975_kF+TtogI|TIC?~^pX+39;DTJ#emIB zZ#Mtv`a#3pSoK)!@b^b#%f06XPJ2D~=5|wAV~M}XmqqSnGpnf|gMKZZ8#kvaYmc17 ziG6c@?d4U!zSyvj$c9)eoZ~bi8>y1+r%0ArOKK5~!>O?Nm}B+$ zdB%oq1kxLM;EnX&KR>SYKB`1+b$TD!-g7}f3_88fI**3-=KJ~}9AZ>Skh9mwP392U zxD3Z=wt5kZvBBMkjl#Lrq`@aLHu{CtdsQ^-6?L4$h-Gm%clcTHP~)kao4v>W?^JIk zn@{z@-m%s;W%UvKaU&)+P)`TP{{7vqje^w}VyzJm?uj)B9tX0jYKn4mQnPA8O!%>w zr-SFw?`5}*>Dd9!Me~{I)79nEUrTW-D`K-}-bX!EtdYE}rKBAAh$N5WwxPtG(3QEJ z(mV)WcoE({Cw}uGiFfr`u1!x*5#sw{7_wxG|NYG2+0QN8CVn!elaY;XPGtlY&AGjo zouji;z4vCg?Ou2WV!tpP|9mU-ZRrj^lB%I=zA3xDbjPn!yYZKp0?tTk_j^1D&OgszK;*e)Y8SY{d^haB;)t{Oi`y@88Q>?``d$XCd-zL}}5`L@ZSTQ$dX z%g0N*n_W%cvg^-WHs1HVFG3zNtalWyk6DkA{3Mwq8ILJreXZS5IB58?w%>(!J$E&C zJ$DT{HdL2qroB>FVYpUH>Z0hZ=+foPzNR--USjDqGFG@ZN$e7}$~dW5k=SqDQJb@x zRg*Yoy<97}{^Dm>O`;Wl_GFTa!_O`!{>sS-7ljG-5`D+YQERzchK<+~{blQywV4~k zRr+R?%hu(S3C@;N1!k4QlVi@7_<=GT`(nuk8=i-|=SwDlFGFJ95#DzW&J~f)2R4mC z6}HZ9buRAi-oek112bEDoLouwsm|=r*A4GIUh5kgI+{-$I71xZwIZV5&NWA6KsdUU zm8`uV1Tx7Sc+;&v&N~Y~N6rpx{jA+ttZCIl+T=*HN9`dZUmPO^&po_Ykh=~+!j1Qn zsZ${9)HLV67dLHW%u|8DoeMpV5xzy1`nC3KU@l7ReAj_lID0WrhROSQpeKDkqWS0f z@4i$oyu8$NPUIG(8R_om#do@c=vKy)R1KYhAkQ1Wdfz#go^3qq@m@DDq%kz0O07o% zd)Jfh(-@@CB+=k)-1Y9^TCQczvpiq(K6j{kH#~*dopQ$8#}}pY&ztkc6U$YU#{t-e zs6O0m8`^3*m(%gwta5NV-T!sCdNu^82dx7{yN`=B+e;1Lr#8BGp4ggv7CPrQj^q(2 zn>&lL-5J~LuMJgh_CA?gf_|ybK;XaE>iphi={HFg0E8=CDWGy78NnfnJZwKd$(w;p z8G2whrrh^$&aky};&Gj^b#o%;*bl$ia~IFcChtx_UPh>`o9AnCjwyVk8Nl)sBn8KQ zQDEZ>2CAew;UmXAS}G%m|CwLtoUPljyX(QpkXcoY5XE`+%rc8d72Zhp%}!Io?kh z`B#o;&ulN5Y@8h(JrfXa6u@ePq(YKXlNyl^l5oAxPEGbI+uVyAk87Ke{-LFFwsyAm z$G?Bg&9vFNdOB*V35bXZH;6Xk1ZunAB4e5v89+1-{oa^af*#g2Gc^j8*g75Vd%C*R z(BOH+5qdQ?RN;^X-U2M1K72@Ze-$6`Xs!JaAGdL3xivWjdA!eTkOZMhGQ_WR+1Rx3 zCh1-`q)EMR7;u)6w|O#jUL^AjIf7^w2Hl$3whdzZ+I}AB;?9))47v8QxVdx(!NCze zMQupqaj$W33VQ9f44F-H)A`<1u)kk(=nPGpc_SXp%Z+6q&+WDwi2DHpl_xWYYx9}{ zUhZ3b0G=A_bu7#!k7j0R zR_N5t&g0&cVSS@pt(YjY`$-@Yk)5})xxcIPH85(3tk>4b&5^6_)6CNL?(%-~+s>5% z+s!SWB%@Q&Q`o_NU}`sP!}|R7A(z2@BWh~ufV1YAg$>ArsM;dg$jtPw%F0Tvmc1^7 zfIyv3W#i&@dHpf35y(&IGy4VsKXBy>-rFu(1(1w-Qz9 zo}H>x7%oNy*{Boo_V#k~_-xNlF_(DH#kydxRXv8st+4(qYVN>1TgH53F7zZYFixgf zp9bkLAEAb26JAvGwLo$lHz%y<)Xn4d*-ND9;_PWiyoV6!Od3H z9KhUMmfax*nbFY6X0GGx#TX|F%+3_vvSH224{o0CRH8b(*koI?$sW?Y({Fg#ir#|_ z40?Q>7||IL|)OmwicVC;*H@(r+`KXxmOMYRrEkB1Q3)c2coT?U?8$A zJhrHlC}}*3EJIkZ23v_N(UEeT8z&Cqtn%N<37{;rn_8+S(XYKSoI2RY%W{d28k+;l!+(K;vwvm3DtC%+G&Q=RA3T^9q%Qh3l zCS(tc?ShatGDKJP)+-W4%Nju+BL>wXi9g}pI`pj-q;bgKu5tAUqa+h|BS|;%zwka0 z<-LQo2V-{O%NreOiQJ`$QGc?Gp7Bbggph?7+09)KcbI!^?f45d^hv@w8&VyfpH(gJ zZV8Mo{+L+!^d|wX9$RNgl*-z=i>=_^GRbGp{?UVZ;2MdF&adgT-}bkk}N%3 z+oyu#o^mRoM*}$G&{tS675=Cp>jvC!?!i*?Z}g~SCWLrUF19|3;T5e*aG4#tKqP=k z)CB(Gg85$sDuJAbnb)up)Mk5q}(I5sk*d8np4YgP)ieN^o z|67CY%ED4J|NYF8?!X(t&oWy-7$pJ2Bav=PySK7B%ZkX+s{h~ev%0zh;rThiz~c1F z#i4Xtb%*a2f9fQcw1%+;eT!+j%sb}X>n6oSWd;@9xmL=cR?7cid-;)+-j>rM^NrvT zl?;iCU;wP@+!!|iNZ1SKf&Pnxwul9yml~#u1L3~iie@W}fAEMoSCr-iFv=z^t6%3| zjB2V+RMsHhuwSq43m^IHFXF6GW`-piFN?RSujvN!vGiJZu0~- zhhDJ3@z*#J0N|*doY^hj3@C08UKmXBvJ$tEO|&0LL1R#LL52EXbwTm55!poNk;d{C z@B7Dou8+CzqbeBS1kAOLqbhDE=Y1wpBwErW+`9?|pgR$* z+u8u*x8_?nl8bs^1=NFyuA_Kb-4(ztt0&}+4lkS?`Y+0-`s4HMTEUTOJD;}dt;hWT zRFH|DBaOYSI>!6A{*$1MC<+1!6orXNwU*FPMHR@Sv!t4ZyTLg7g4~sYS9KF&E4HZq zzo`sye0)xXtcw(Djy#KNlIP?UGclZHCrL}oKCx=$oq0TP6wY3S8hF$A_UT4Z-ZQg(R#nk5gm)}rr-v9TP zn78)!?rhJ@LimJebaSdKJz?rC8)$=%tx z;uoX2Ur;Phw>3X4nYZfgRq5^I>F7oE70P7Jgb}Q45#yG~hy1W1Pj7uov{*j)V7Oy0 zbYQMNFXkC3N1W7b{$cOK&ieW7D)7%*A{C<80LAnv2pI0J3Q*z+F4;MAS;p^Col1(Dp7`Fh4Fc@d}S!s-TZ$E;_$Ty&KGVi$>3!Y=yaD600et#8`ozu<`vte(5f>k4{f`!z`7tObyWKVUd{= zJ=|cDDbLD4X#OowOOI{1NW;F%A`@+>yrpur$2a+ECVBeq(1ZN4P$==yl;MBbz2WcW8f zpQ0{z9sbKX{GZc#iH#ebUp}rki?H$*%s0s&p9K2N)#Kj^6sDzLZ@e44Gb=Kl|9N@1=0XHmAyw(709x6O({QnOL%(SbhE{M}3Mxba$#5>~1P5~b<8eGbCZ6NUrbVSaXF z7D;S41b_s;7di9Z+xaYo z5v?3{tUy=UR258(pBlv5nfR;BJnp(%@+;E&6Me#AZZ9R1)@>4JLvJjF)#Q1mS*q4x z5($59Ynni?iDm-ZmRuMS>)Q^?`@x%z@ljezopfGBC>ck6*y(ZasxwEc+a*tPQEzm! zB$gPSugm>x-%03035~H*yIpU}25N2f`l;>HEr84Vc6p!mtAT(Lt2K9s$Fm^_(c}SZlVX?lYAdzUHF)hg?@r>~W zqpTT(-=*WgawqBHG?1S`1qZca{<-qlaVVGWYlmOueZAf)?*WL|o2 zuAZ;Szsu>6_(Y7hLzPy_qnx?VUcR)b9&GZO-hp*M>$teh6#~KNX@b_tL2%Aqmp_iO-o<@YXsy- zPiQWUHgtsLt=`j2HdBWnSr1wODy(mhU6|iOf2az)36MP%-9_&Lkw_SlZm$KmV4}mi z+*5S7ze7}?ZVL|`RHQ04ttMU?ZG#eexu-aG`h*&p*1KR%YVnp z$EHF{Z>K;a0oI_&OQ0hp1E`TMAv+tYcDq0cfkNz0zzmkR^bNk`64eJLJi4=LKwdO% z%XyS@zYU4D$iGJvXPg8GiQonT@_Gw-Jl=TP*1;3VJDZ|=KV=;f+r%s=9E7Tmlx8XS z%|#?-BwLN-<5-z_8+l#!TsQO>qF3U?OLZ5$C*X*)6B*rN@Ha5{;d% z?KhMQ#?w05LMI#b;ypVldqsTs4h0(iK(3|Ul$Ryxdb!<4r&Hel?$)?sh5XvYub;%I ze0~6UHgPvmmO%GMo31Dz7n>GEtH+Fa-^T1kyCZwa&70A$1Re~-%Io(vhf(kapPn$C`ZJtH2m7P+V)^&nQDsI7*dV|;#obL|X|?84 zvA;Sp@ZgBq&kl$Iy(mcsKNju%d)W)oH2Fz>cCF!d50y|Xece}-w4kL#`7R0|q@B?& z>*Aom+`VDcp6*bu`KQ+NPcI~(o{}icPvQSO{;;0^@y`=5Np(jeROL2%QC^^|2j4EM zxgD!H`K91i-DJDrp4dp z%4h~ns)^h%L)*FKW5-tjQf}7M2ff{a~%ap9XGRRe!DUG7AVZUycT|t4g2MP32R19|^hKD}|I_h$ZjtjLO(L*n=!DwTn zx^tYC?~OEAMBI(2da^6G$WvH8^eOTf#w($*NkA4P`g_-$ynxsvh7P@5H(Tt7?5kp> zT(oR+wBq7Sd6Z($VL{wiR=jzi)aX$e6WbF`G>M^$XFtlI-?qvHEgxydO zx)3(d{ktE?FeFTX*pM*dZ*8ts9QWeD5=wJwEM-+rTP!RG* zo3hjWdyHI$9Fnbu-aREKvI6X;$=}(N^WVNMdIPu;_6TPW=kQ0cXa3pn($sRUH&Q7F zZCss0jdQE&_Yp{$R*)&+ zE(5mP=R5om&bBl+03GqDv7*Dbk(Bb9oyMs=Ljt|yxDE)P8`00JS?P+31OwRBB+C4D zroqeDWE*wLls5@o*;=4(L#Ge*M^%u;tvX!s(1Jx@+mmv*{=@a45Luv{&%G^dL3xH89WyWC^2j!5HQ z2DE+Gx_Ww`PstR=e5DRZ9YgAsXDOcSH03zMa{oWS`IInEa>3(wAD?_5q9e{?BXyv8VVsCQ#i1zfAo|w*br0 zyGt#*t3aYXio|1Aq`(V|}tfC`Le&PDI(nplD?CThc7glu&p8zH7 z-7BKY(4av2D^ltYXw(6S08tJ`i>@0@vVfwK4uSUI z6s4O1l693xwC?|wNY3ahG@-MBeI*HNZ@X{%KfwFkRR1b<0Q45`kX)u}0EM51e_$(7 z?@{)DQ}z-BpH%)MlQH@QDxGpQ?!w#yk3BWjer)5=7?sNJYL{|EPULY_t{M%EY7Cvd zQUrZ;*UWc9;uXZLZ%8We9{1==L7g`A)pFOVc}Aiq^XP`$?259cYLKM$W_t)aOAT+A zND`}Q(X#b*n$N$< zT4&9?k5$*rB{g^5iMV%)7EC4^q2-4NefV&bA*W`4v+(O4p>OnBVF%{h$tkBP2m3|m z0>XzjWVhPHiWpTCkbnWs;0so1y4d(Xm9t(>vf>i`irZBq1y$VAPr64!DafWDMw*#3=A{5-S2l zPfe92_kK(6kcNsq?zAx3Wuh5o-{?h*~}}#=O1aXdR)5PIs96kxdW~et&$GEkO4#o z_yMFwOKea)Vu==_NYShck|{WINMb;n&DIlrj0#bX>- z=0|K~(+Q4QnbsQa*FQWDV;2Ea zDOiX#b(mp)Mir8(Wk=EX46t86JzC;w zo8xp5y?fpoUX6cfzR3P)cJbLxZNrhnK0BBJ!n_Pu8B|G^y%fmOprQesBQJdb4i-@% zm)`PR1nO-F9qNMN4?`8^f;zUf!BU3ly5$UpCFc}3gPks#$MZ`hv&|rxLKS*|f z06|baMn%Ij9lnmCj*5tR6Q}i!BZ)qsb36sdTuOY&0?o6l&bsmy4H73|Vz9!OHs+9& zJ8^j$ZtX#BE@Ht>Q^X+{Bq8)L`3N;Nv~;yWk4!j_3^_D6M0JK`-PF4nR;tU~%c;ejiTw~5TN~G-Ps3KxeJ~384v8wtXp`MxW4GeB5 zx>RU~WDVy$=YS5P%SaKiqJt{stpy5Rbd6yJqv#ENeNvOPWVWP}qDNLj>eRVTF1mnJ z9g6O1+*%-kzs?gwAc_}R!8cjKAuM2NMU<^I@)2NdPzS*GER?Z^nX-Tu{%Yncu?B;! z!PS>*RZ+HUbW-wycW49m^P6QRsFTar50U^ng5-})Bgm~EQ-aleoa3z%Fh zgfN$w;W83~+98g>x#pj^{11yi{!h_*@maD9B9@+5K`Vy&G$JI{Q)CB&jdvg*mSVN& zXPH5N458bJp&Q4F&AH4J&{L`{ve1$7yhtNAzxto#pn3{Cwr4yYP8Bxksy2X|ezAgawNRoj;Sueb$-IHVdbJf5<1&>?EnQ%%q>uXWOIOf|i(Bg@ z7P(ouI`P@OS!OW&#rl!^kH^baA4PWW?Z&|H00o*M*q5pQrX2#@@%|;<#$NR*kWLz?%mucgj#?ZvnKT{!v5WBHUU zi{0v!-E;>_vokSbofYiM0;W*SgkJ{z4?q8)^AA7&Be9j2OD_^z#Hp;l>zh6O>OBNr zH+{e8+x}L&GxEJ>rtGs!S>r3jzyMXAOgb5tqJX>P0xawj*DpDsb3Z07*mEr3hboYI z4fS<`;P9qz!xE(LiidaMV_mJ-jQ-m0gCP4mpSu&~BbwwRVDhRNSr(xWIj6br@=I%j zbt4Ydyq*d{BC zD%+OAH+!r_>zBBc+n+a#FHG7aw(%O&qKz}Z|KkwdRt()wtl=woqA8GlRUFWH7rso` zMcx~z6JU!8V>^*sTy8mGtU@7{{D~uPV^w6_rB~{5h|P3B@^H1mONnX_P(N@3yu>AN z`2Gd^&y+j=XUaWaCcwgkqf&5FEvNVkeMLrco+XY<@TCUVza{GP`enWu1G~K(vp;D#EbC;AOwhH7F7RZDqRSMAL>6|sa# zRNW!b3c-dGXUhHwwd!^Zj7Nv*Uy>6IqW_ibypPqOf5S+gJ#`kygTfbG<>CG=R{AQJ zRkP_<7jvJ*Pe<~qelaj?R`8|oW)M(^FfG=`Tt%W9UhrapG5r!|sB1MlCs2FF zsNx=iPCi0Oj}1KSdi)c;r`CpP0NH^tumBdY#HHO^F)-lz@Gsf_%Wk>L=v|cf&dvd1 zyW_o57@JEZv1p_B>kw9LP|0mpF!eui&TUZBaFI~EvJusuaxdKh*sI_5L-7ivXzUw7 zLbGnqEog>!IZ153d-U_PRz%vx&zm$grJ`EM(m|1+@aAmKNzlnAoMrUD8z|3&8laLO z7VxDM)twj^a2Njn`Z4#zAoD*$gl6%hO)lQh`CO0K&`_b1AH-TA82|XN+@Ln-BJt=y zeE5$RYyLl5tbTmf5>^>cOSVLQU(?~E?pXI9{9c48pp)klgK~+E6Cf>uDWMHwx#-{kjSn%L?zqyi?quYrhkOgz;k1~|S20&d$9hJ&Gj)9{=Iii!sH@shqeTIMc z5f4y>Hi}SS35bEcWdUDyW4gTssF-y zqtPTfLtoLGv)7gNnXK=K=A-QPkAVSY{%R%AAYcCXQ^D2J3!&$}Sr%Ax&w?mtowtDhND*<&f3Ji8L#i2pLkOvsk6U}y=z0Tq8efP( z46FlM{^!i4x)%ehAyT_y02&!FmvOx4MVCza(~CG>DfM}tb{`X@Y}8?R6#9-5W{H+w z-&dfc^%aJx3;yYX4t*#viCBC4C(ylsw{T8BUW_;~;AuACNjE5K zyqioAQN_FDpt@JR#4`3LLL5S2&Q?h$BNt6DHB<@c*evHlykLIs_;k@5q9eLL#zk%n zF2XPuVB`z1L2~q$WpLLbEYIQqyz(vx^_d!+4OkKN4cKKF!L^Csx)>6ddZ#fH_}34$ zB+|8*Qni9==BR6g=$NAD7&~zM{PP4C>wk#nqr3%W)TlBhT*|>3; z{*8_V1N0H~BwOT!6bzZXl1$^1BrB)JHc-PgD^mpDRr%`@pq=Uo9$|E#k%=pe}TM z>$_6M)FbNY-RL`0nJ{pC9me?Ct7H$lqEh0arVF3ry*Id zt6ZsTUiO;A4JkC* z=$cnHH78pq_yza zK;RbYZ}^McIfAwPsTf5iEOMuLd#a!skJJV2~r~6 zdqSb?DqX4R36M%%#BL-_Jo|c??r4$O>y;0cI&fJwl?XUww4cB+jR+*df<>l`)|yyVBycB8yYmalNDpIqP3V^skeS zFTW_|%pB61nB?kgCGPO*XR>Y{I0Q*L?66ahNU)b%*IcPSJ|O7> zz8-4jy>TCYPI|x|&@(jAlNJ5o@MpZ5qf5Vr`)+E!3-3WgqzSiokDkpeqMI?O}F-zy>N$|Ngjw;(? zu7ee`>^jJ7qx5ogY3-v?{j;~_>z=9>!;z3lJ@%=_$Jd56 z>WlDoP4>b+wkFgmTeCYM8w!T8PoHsZ2!k#8k$51jMyPyQ{{*LzhMs-+`(Mxn8p4&n zB3fGW-AVaeQMcQgCR?nbtwxg@itxrV5!#9lAT#1Sp3Sl@UrwRRm=sx{PDWV%T%RZfC@9M6bs_l0g-ddrq889Chy zU2LeLcQz}o+z!@K*d<$GKgN&Ef5Ul-F`8#aqX(8`q?g>>5d9dvZWxSrbHxv?5m|N6 z^_dI_4`dsfcpm$FqYn|N(SqQPV*6H`JVE4|=H};{kPd0?v`5h z1i9Y`edcn17A&`Q8gb}@x7uv}vLMeL5gjFkY+#U~xf8N`dqVQnz-|jATRu^3MRH7& z*NE-CcH{EVd-@gYHt&ani(BreTfQ{$nq*fT zOw@1hKV7t9;4Q9+(K_DM?1L{%iNl5$41;yM3LP0i!zuMmdRnPJD=gZd1m^)b$|ZFo z-}%r^@7<&JbYYk=4E*UhIwP7Q`B~=RH?qMHo@<%yw3G*_Y^*x-<#~tj7Fdf-w~NCy z1Wc1X3JloFMpRn}*g9pw6ws1(rgN6v!noj>1ik+8sm{mNU1}ca-m|Xf#d6mzdCu#q z7chps4oexb7if|>(?~$-&-<7`P4RR`fkFzbUh@@$HPxv@I^QtMV7WgNKjY?o#-~gS z>T%JxoE6(BUZ~av!x-+8XR@We(o}a?$%u{~s%4HaU00IlUGdtGEHy)p4@TzZ$;u85 zvbr&rBJ0gM_lKxRU0Nml>SL6CP4srCwVeF=yZkl&9!7koFDwYG(tn#Y4-yvmok&AX zpKOmnoTM1UUB}6y?x;chc?^cE%cM_z?ypCub1mN|EZ;cRlS6F1u3FoHkA~^38YVFU zy6iQCgK0(PKm6b!KhfRZfY>|C@OeVA zyFMRLDLhYhgFlq+nEk;n7cQEQWfpXtRTT1R9ig4+T%0lzeagvQp5*Zv{5<%4OS3DD zinJ!U$1Px~TU$+?_m{Do7El35_RL5qS34H1C8bu3%9*=1C@dZoN0{+4ARqst$DBl@ zcpkpWk%s8<%V81++IOFSe9FMEAum;aG^=7=ZB|T{L|m7U>l^l!4%eWdZE1xVB)^8= zn8>d)O17fstaa0!n@t)fy(#c1oimP@;49JaJOnsbXVHNgqk`3dR6ub585V9Tow7@xfZe(o)FQbC7!5mScfG?URQ#& zE;zQ&kiUh;wTlDA=uaC5ftwPv6W?^DBngdz3kW^wnRKJVMU&{F#_Z3R8 z4It}j+m!hXkYXx2UCDEH@ zJolCU#=sz62TM9rS{N3VsXDFcO4@hhpI_#L^Y5I;>EaCXNs>TbkyF8XK4{GwG-Bf_ z{N*YOmAM``_Fz=AsQQN8Q`H;1>IB6^f5WKnJy}U>ps>#|4i_Z3mO7GujhjRBizl4- zIL5XLfBNH(v8L{ZL2N3M;GZky5^GPyN%BqaLM>JALhn;B&hV^dv3Bygp}Y5g6NC5wr!4 zMGN;m?U?Bm8w&5)^K7!)cRpb?tM@xtBZ#kS;h5f1toD6UZF!I=o9?nl-~+`-6oDx| zsf#8W#k!q&&BssYMhHEq{fk#l<}5=(6ZG1;w!^h;e7W+&o)5%lQ#{Uh*DT(p*?)lc zK)7|_j)crkCHgt)zp2@5Vlgb}1Cr(h+4vN$ozu#Eu*Y$s7C6bD z=(A7eIL7M3w`ylfowiJQ;oSi}Z@hL$1Z@kd{ZiS}b}6B^G?{`EzDNwK-RQtlz+Iiz zA+otfXnCswE6MHTIRJ{r!-xxo}$4YZA zZ#K`iyy2gwF~eVh$HppijqnM`SQz*_h49dAO8mSIf!j@u)4as(1GT;dXTACutZwpE zpuc_NSrdHiL(6a4zX--UERDv%{5U4=riyzsXhbX}F7kjTCf60e6x;O;vDyQ|v|;QN z!cKAzBgKGU@jpAX2V1H@T4|%8$&ln?Q4!s1gu@0OoNV3^vi2{3Of%flBXlb_hxC#t z`a1rlSGP?&sPUyi9NOP&qb}}28t6Y&KUPbd<4Cj_7OIJON9;yOPZf*z3jXQ#_8U51 z3@hBz1qSoKxuT+L)txY22G|D#MPsD15PHtv#xU^%{+@ph4y3Cwx7xQWbdb_DS2INW zk-Sx(Im9H_*ag&fJB6@-M6<`>s7arVSQtw~I8(n|9eQ2#;|q;pl^X?8%5QR<9m`Y1 zH4F&*#aQ!K%7b=$avnHYK?znrE!=A=wVqVbtR^2FvR13_3RFw0t8=+47Jr?<82#Fk2LF7=c5%l#trQsYEV17}F-`n=L_XN{+&cwRfu>yCtDvoByUPUCgl z3As=>_fuRR?VGGms3`SvELK9vuOBwF^wIqG_eEGMnns#rB}GVubK zNm&Nzo$sZ3*Kl>xL*i6%Bdo8#PS5p)Tam;+b{*1Zix$Qj1cFzu4w9wiev5IQQt-=i zeW{jWhT~VNCi#CiTR*v0zl||{5L=$+`Qz31PVwrO zntvPK&bjWRP&}&26R(O{y&F;y2)&;2B;qd~xaM8T;HL%S`fP*3Sj3MrDVw5(@hFVh z_i!TV#Wl{-Sc`)?`xx%Se0}8PHngsjeH&z$#0#ozo1^l2zIIpWLg>-33#w55lKdIFHs$b@P5z~=}`>GPJDM4=>>90S`3&mQqCy7>A z#B-(s6EA$mc0Tb16CX4M)M%dY!ppJlCd+EvI7nyUIm{;iz%3tneQYb!mzK{2tiqA3 z%4opm>onzd{X};%H(VBT9~5lb%S3JeG|esVyJe6jPwd~VcyyB~DrZlHm#2p{8uVBL zLYRCavmMx`F9~z3^|k3nTK4ZL}>?svbLkMhF!RAb`%p0Fhz?+x`E4z)d`!)6)C!p)I{ zUZ10BJg^fE4JBcpkIHOD^OmUF+cEvgy(<+>ZDVTyPk zJ#`rG@%P7hgGCFURkrC>cA~9$l}WJbB%#>*gY@Zch=q8fi94Fn>3@_ZIu9G;SctrGgCe9VgR zaC?N7>6iYt!P0E{@qyM*s@4FvjM$$l;*l5`rl2Gzfo3Afucc=?t~2UkDT6nM*Ly7` zS^O;0=n}^?vPGddl)XMWYIiqqLTRfSxnRk8zQ=bWeF^Z>7cp|@3RpL#d3ja#Kp|Ni zvf{H#6x?voURmz=Ec17cAtkP{hKyu93hsJ!3y+ z=lIUtq;&#Q*j(#~GsgVJphyvN^Ug#Q_pdy5pRL=p3X%m6v2f0)VcND#>KGXTz9hYc z6i;!>V+U7Loq|Kk-gNbPX%!Y*eomtGOq`WbULhsw-1NR$4TfJpp zpQU20zQPCLsS#;Sueoz99gsfI=_}_QM7}hZa_*(yW36|-2;cS?mNGk*YpwzaTdO|o zvjq<%uwCwn;x(8?1O|(84kOQg4JoozB=0Cmn<63Fpccl~s8P(ygxlqDdc9)G&|lVM zdASkHN&Qanb3p3=+;ofdp(}Hv@JCC@2Y=Q0xTXHWVF>^~=yi~jWLcoV#NmR3O>CYh ziTG@}k}B+2rd#Z@eA1LD*)b<{xc1yTO8Ub!d8?m56ZA=t2p1XGc6MPId<+_VJYU}S z{3Ef3peG}n+J_R0%#)KWmk*EgZH#%>x(Xys3|Tj)ZHJ;XO2^L(Np#P+1s^wfZBz_L zi$7pHC1q^4WN^&<>VkkBEF^-#cc^06aDOe)T@Mf}hkfC5P$d8LnF9CsD>mQ!*P7dj zUQ;vSm2aY_c|^mE8-?4hJF+S!9+ubcX-pRq=qzq6bqwP={rys@&?_s}u)G_n*C_MM zP7Gd4H0PIV-i?6rrA>@cSz5yrFql*!f2XNi*fOTUG%=nI-Y*f|ti9)Z9f?=(tfC0TRJvzr;cXO^YA9GI zRABh}50yQHf61|*7)n|8t1Le6TU@fx_IKw#JhHm^irJEo_(ASrHs{d&rXYkyZ;};3 zT7d!s6S4tX?`zj7NO_Mte^%yR;am8V9!=`?+wsy?_r z>hq0o=IeAjJdYXi#r%LTUa!^QuRBtCm7%hn+8Kx&PX#k(%qcU1m<3)xpo|<%vqhA`-S%-=Q5->GWHa+iol+!R~v7ca<;C0?r zafYd5+lPKx@N??@n6242kxA-vF0OE0_!|fm^kh(e-{^s*OjM%0jVxA-xEW{SwY50X zGW8_OFt4`ONR5YkjaqlNEOU(4Gh}Z-*Xg=T>b^;IMM-q2b94$WRDW7%b8%GkWhM>|Q`?{h`_30*aDe;e_)QQ>yMLChna0mp-pSfe7A zOH1E|Oihask(gZyO2!>VvbfYqIym`c&vc}Etu??kRy=CF7BW#0wZ5|ACK%N(EkHg7 zHBC7^B4oBfW`qxY74!K&Fld$~QmFCUGT~C!66#5FQ`e*ie__ZVrzw#6@JzwdVncy4 z{>`Hn2Ih?v7W`(#{%M5xvLj*;MG|s=!#xR1zh$|p``+S#rYAy}zG(ElkAG1X7i|)t z_X@s8&`U(DChjnelVX~w^<~hW)#6x|OCxUOrp)-A2~3-T_9xG9G<@7>ozpmC;L~ZJ zR45YSHL(R=v+iPuh|9FK%SrgfE6DI>ML+80%8?!fh1Vu48iVuRD0SsyXNU6hSl!2~ zDYQLTdSvV3t0zrFp0wVf`^qnY$uA+>1L$#C@J5vgs3s-Fc9kHbzzr?R@0=2ZW&9}* zc2f-d)Tw6%GqCq>_%aOMhsVY|g_B7GTCACcO9 zEdvtij_x$fA60jj@tC$yS-r`t%-~g_(J{Z^+?Kbqa@wPJ zjX-^cI{D}W4%iAy@$4zV4;=jJlY!_qZsZsB0*hNx3b!I8Is(`#GWvEERm`L4ADXe1 zy&Hs&wz>jBa8aIpY)WMtlbxnF}$0aMT2j@lxX-ADUq+fdAAr= zt?e$eKdfpfsI!sgAfDkf$?W9Sug@krhzCsx@kkNBhS!1nV){voF|Sysk6wY)Cz&#w zQ$Mq4$rqR>ZOxpw>m-$ZTc2hOC@p7~h>uj7s$`S&1u#nReWF&L#c$VhVoiT- zMcoo4IeH5!UeE9=13Sc#-H#&2;^~u*=}WfU!Q`64Py0UNYV{Hz3Ve2$>4z}NG(JC2 zwzN&EChp~8AZ&7E4;D-}G6UJtkqg+^6px<9D7^s!2>j-?cA|*{I$fRKkoA8P>s(io zC2`VtvBu0qWSb=aojn5wvzwO1Oej}m!ts=r6*Ijw4(4AsXEoY!HCNm?r_D1njwnP(${_K5RNeVcxF~AgxZM5xzE1V# zq6Z!q*`Y4ihq?Us-M+kci(45LEx;r1k0nkVrNdCA?h)`E-`R!nFP+6q7&&o2kt>X^ zO+$4*le()ov0P*XiRmxbTE6?*%J4$1BcW*)phjoK}PR_*{%mF6d> zz?UX(X+hc#b)lZGV}8DtlkR!**ZkKHRDb&qIa2su+J?2>=F5pBkAjM~eSP&#=##+u zIpa&XA6~j?`}YSDuh@N$KMmeiivOy;GThVPK1!i5t}WlXQbGs0+w?{Qe(pUmgfm`~Kac5{aixX_@r2$=0Stn9!nB zin24Mrv()y`*I>uDk`L8Ns%N;k=>w0kw{sxCL#MW#+dbf?{m(~7}WFq{(kTK{zsiT zbDwMZT%YT6U8jsMO{!DfvPNlj$s?&=(_6+9a<ki7PQn~#_k00vFhQ=vOqIy~H z)>0}zOt99p6W8@_STf#KRKIl5;g!9$N7P*xrY}8zT+e!I?Eb%>Fy=;VIcmEo;^6+j zR!@|*dH!gz#jQJ^W$tF*pKh{!p5OM>3;&+m*)!d1Kx+H^_U7FuY3fp;mt>#6kaS5P zUG>9HR%;cJyt6c1{`UTDB9YYM15fHpq;1YTSsD5vHts^}#;@Lkk8gQyRwyg|c33xW z)xz~r&KVD;c4j@Akg@%u!}%w-2PX&ZH(phhDK>B6w(!r}UfjsI|9Xb(CUu2b>UL5e zmps2(U23`W&<*`A(Jvm;2CTeGV=0pji19fEXL}FKW*l5SbCsducSn^w8+%tT+{<|` z6hzGvIwT*n{d?ZUeN-XTelqICdgAtI#)cOQ`HzO5^8n&&<(E%3uV<*%C#OI)B3U{ z`u5?j?#DXvt(W(yzdzZ3vhy)5YW>&L$Fsu^|MS4K7)>D`rQGIyQw%K`YTf^QuJa*o_pK?$w^0dLMuHK#Jcb7|OsTO>5 zIBsk1rl}X2toB&A_WnQHU8l%<9a?HRLF%7FiE5;{tQ*Gb=7qkiy4gZJmTYiBY!7={ z^-RN;8k1s8FPY|^Esi>P-eRxc$&d{x)7R3x9P~A=p9BxipLa|)xG(e4(9>nlxyi5Y z{;gc|ck{G}T^(LoFV-LYy!)|V_%csNtx0>I?W$H^x?Ifi-u77EM{uMV8Y`rvqvX*Y zuszsvc2KI8es9=mfBpT_{8vnz8Wy_Jtb6C%jRB^wF0UH!b2zg8VW4cv{h22Z?Y|u) zqH7ZEbzH;p;+8&n&-VvJ2E@g}BZC`_mKv4Hy*1sM%@DNTNLE&aQ)o&16jMLxL&v=dS~w9^q-06C!EKv z*G%y~n;gGJ<E7pS%hzhHJs;Ywn&D_h?2~R>M|%mFPR4H)s4+Z1!~CUXZxP zlX*Y=Lc_hcLdV^-uATf8u|8SU|5LA2x1zGA=o2M_g4=Y*_m9s+&*nV6zw4ZCE{ob| z#h7h&))zA!Ri6_5_Cm_ZoeP6sAB%X{A3d-9$hHGYA4Nh z9=NxBwWh7%=f=DJhZiC5C#{U~IzDjEpy;rC(B%xbz^PBvX?`rt9oa%M^THw~F8}J& zC^~VALQbM{@QJ**BCllnyl~PT<0-QLcx4JzYz=WKy=L)d+=`_}3Aq`2_p|OVe>~&c zD&?u!drLT2@rSKrd+TPM*K$4M58AS)+8+y=@p)EvEUJwreDO;hRLo9cE%FI`PCnpNDt+fwT0r2`IK zf1kW5qFk^|sC;U&@caH{uca>U-6D^2U_GzU*ibbSxpwowpsns^+aizU|7cI!jHLK8 z9)B#YPe>Mu@O}DyieO`~LbVKj)^o(e7=5L9?63e=Bu! zy*fE-LH#80{ZZj-l_lRen@g?I(=A)yn;O$w7s@*t=b5iB`hARZlM3_!~E# zx?1u5%8|#Js}7!@ywj^eJ}_vt(ot%LwnXCmJ0ItKoKa?+ zALu&GVTSRlBu5U$MbhXkGVY8}EI}`pKe;7|9bqpp1CgW9KK4C9f`;E!F&aIU-yn ztF`6vYCD40=C4HW7NStv%%H!2`I&T;T!cE(67`4oPpo}lw<4vIsnbxrOfG6>QOfLr zuw7FUCqLHGn&iyPv%Y_q`q=OE?B*raH+^N&?=P&oeDIa==gs~i%lBUw5m}#^{Gk4z zW^2>q2Ua;s2R%jiTi=y?xTxUi$&#I=AII-nXgS{cI;rR=4$9f(D#6!i{p(yDLdVUo zUNC#FvAEYGxu;e&Cw*s~-{EUo`pMzN<#zXlA5TtOzSRi$MDCqktfxiI%lbI~;D?h^ z*P~wkSUO+%*ro^H_HOq4)+#;K@6}9|%AEXce?R3Tn!8Tky0m?kjG?EJgp-J2w_m%| zq;LnDzs?swb9t6qJ}W%hU>a51xVVDm?dq8?JBGiHtTgl=WmKR zl;C;NcJ?ab&V!~FeJ<%iu2Wp2mR)T7dzT(Hcx~LmU(Lx%w#yr>J`xSXXMLQhtvcUn z%IU*rZdq;Di;+^ks$*?Qd$Mq2)brWuf0=JMc=y$+!%GrZZ7MjhR(W%kuHRvfW_gSI zm5QL;D@wPf74JN?c)o#p=7|}T3{zWl-PP9r3^s3DY^&zkaw`{}d+S1rq^N0crS90S zenY7DPpL_keT~N5vB%Cw)E>M~82jO2pBuqczmdulNW1 zo(k!(=H7Nr?tj2sy|lQ)(NPW}%B9fQ<&l|Pv#cI{{wSZsk_#Vam1~f=VV|eivekPn z-#e=}6L;FK+Hs@)fT_G~##W`Td8RX!12ctx-S`}u8=ida$HDSz?ca@VX2hxP-(J-f z;N^T;eNx*qUB#0XyLN2W^f8;h{G@U44qJWG=O=R`cg@(pLhG+V`)v|4r>ReCFTDKN zWc}R2&&A(`v@hC4xa@R3CAoj0_Gr zHIRB@A#SN+xP1vjOvd9$iZodK{>4)J==1NLo=LU8TIcifjcjQDT-_ZDCntG@N$*PU zL2S-kiScUDSzc9Qy|P|LEZibQxn1d&KkM$UZAu*fj7r^|vYKn7XRe#LZl#yK=I6s2 z>?HjIYX3}N&MA)6UOi;BUUPNYysa3G(k#|vK6TDlzSr&ARiN%#vZhtWz*BZ&yUk6n z<5#RF2fo%=vwknp{vcB7_h@(AecBxk2c!hr_~(#A@;!HBIW+fvMa|rPQH!% z-kEafl+Ytj8D-UV8^2D9nHP4L5Pd(qt5lb6+ziXu+OH}p1I}ybZ!$r`awn=y+$7dD zbwBzqA)hgNQM0@MjeSDwlq zTmMaa7AJ*Tcui`RGT~%4zIYuts?jT7i-bZ5E92i~xvtFAS=`$yW`Bkn< zs5@pREz$m98KY`t^bHMBuY1m>%;*zQx?W477l#jii!QhRV6RSDHmmLFwZ+8)@76bW zHL#MND;9R?N4qZ}7)-DfixbtgRyRgE0|$Ptnsiq3>f-L%VLoPO&PZ>R`LaLS*()gK z<;S0@CjtX277m4aJD$~!^dRWJ^}8D@cRa$FbHw+oq6 z@-PF(?}RgF!`GZr@4aVUn^-)>5_wbbeo1HW99{Dh1$rLS+KPIVE4{28xAQusyZJbHU98c{eE;HKl@V2f%h#13*H&gDt*?!S?)A&!^Jo;Icb@t zWL2oQ{nIr{jakZ(UL>;kn&pk(ocebu%XBm%)F#VEm927Np?ZNgUR};HXxz2GB!Ak0_v(f-2^qE#&Bd|Z z`o0TY6!X7)`0BIpxSO--29Ke}qLtC@K6aw_Wcv@|8QoRsI>EukVgmOpHFBw zq4<}VhN0To+wm6g=zpO{|ET9zHgB!Da#YWn zp9?;y#4PSAt$njF{K-ref1Uiewt$XXH>||ft8YrH?df4;rfA1fLRPv>ml56~+@KqD z+*E@^E6URE-o0gC+zC@`XEi`pWS6rkM_r}4wxIByn{9;X6CnB=QmW@=lidwe7zdfI^)C=o8~W+ z>2uz?tf3ry^HGo4cq(&Ij9h%+t(U1w6A&i7xv53#(+2HK;VbERRg)(DTpHt;eO_Ux z>A2DR4Zg|U2i?WymWAG5{;bQ|?9Q?4pG0Lj7Blu;pp&{NOT&M;Tum2Qb!~5ZaagqV z!Bvqhl{O*IL(`a+62}V&E`gtt@6-;}%(p7Dj9jL5C7|kUaO+;>sk-%mRHZ~&C%%##>r8^;Lar4*sAfcz~#%%p8ClSk2`8iXgw)G9{ zwM@KX?$i2E&FP5m0fP3d7kktFTX4yi=VWSt`x8n~tUsT?fr!R7QnKpV;^Op)DuMRwyGtwwJ^!&#y>M^Cd zWmWs~Ui>pv-XPCUwj$*1`NqoBA0A0sGqrE;K(Fzl9+R(IW~jrvrlUM0=6uNEyXP%F z%rJT8H9K&hxu5cE+8leUANnNK4M)E1yk2&kKqPn8PaWTQJNjao$@_f$rgYJJt6W07 zp0Cl8&41LTwoPWhBrG}GUbf7$RDOzdbXa(Z2KmP$L`KtBt$OD3!5fZ7)ze?mR>?belvUi5S$>3m#3p~jj>ucKruS(wq9*?= z@B8)qdyHnX$+Z0C)ApLCh5VHF3p{`4^fd)eRsI6PQR6*N=Pvgb&s*F6=GL-Trjukt ztB!eR3l%+vPh17A|>9pQ#A-az|ppPA`>MRE9Qg-WCRHz9}T*>j&iq(kn= z5AEmIl#~9sA}^VDoPJ~1uICX4R@|0Lf-Z6b%7+SJ`j%(uL)v@3Sg;F7Sv zyELzr85ODSYK>dk7#3wBb4FF{EmJ?HL9EOumq!c zXNb>l6#nk~vsFW@mhEwy%D?(-`llJoJ%l%`=K1Jftv=eaks=#D#0t7)_<}Q|ZYU7O z)^~<)yxm9|{STbq*KkdQN= zw5>xoAfNQUJTWnqn)9l~{j@zjGtXy9XQFA=fWMxYJKJ(!oJgW{^WrRXlDEWZMWd?O zX)!@DTIUTNcMd8HC}ni&#anN!DlqlQxt#KpDbipOZ&uZmzD<$!HIr)QBxxnVIb-F) z{^~AceQHSmsHDZu?47Ca!t}cH_Z3Rdk}h4KX-&*ncXqGknk0)mn}{Q#_^-hkau4^( z8Ck$IesIMs|BZOwj{j9u51WtoabD!|ZvW$4_*nnt2o>$clIZH;<}Y~UFXtxJ+vqP+ z%X4Ni{gmlHuaaKhiAv6Tr|`1Kl3f>R#`tA7=STJ$(Znc*!rL%2;g0SXdBHEb6>7BS z49=eZtUzgQhxSK}1?h;?%xsRvVE#UG(yOK39-0#QZ@wLqzBJ$1IzJ+3naGw!m2%ll z#;&tHjE&74bro{n*@K2ECZqn$vH~jeyIEpKLw4|u3bO|`Nztpey}tU~F`1Si?Ecx< zh$fp8b0>OrQlh(b2;4%hIm9Z7UFbj!cv}9AXHp? z&VvR8uP)5!yx$>I?(Lkh92$iOG-;+Mrp_dJ9s7D^nf57&&8E*@{`W}pXP=Bj6W_z@ z0=Y3@QD!MNljP@qRXL@;PO5g=GvUmXmg-umRu=u(ZjZXgLH*qlgZ0IE8>KBwACN94 zJ!4X-42imofT!YW%cY~`wjav2sZZ#6z^Jbv-+N&~8^{iBiKg9-&OLq8;b(TUgtwvf z)z6}5zeV`4{j?RG>1HJNnGTW9mI*V}Q_U;P65NHi3-9(VSa`qJKZaKEAjD-|(2E^u z53inOt3~gxtc$gQBq}GhJA9Ve^twc3>_Ms>`eCI)fV`X3ux?zj|f%>o-(s zL2Mp2_*esvK=P}HE5F#k`5pZJryw5TFQSmi+);2{JKBM5+`_Uvykd1Ims+{E>M)aYcelye4lzbor$bhIv)0XDr57d4&ZLV<+xA@Tm9m(_==%KDnoKtjd>(Ia z=ix4ylt~S)yG6;TJx%*n5&38!&RFfhDTT8rSXccAu>KFYK@_SP9fvpt`7}nPN_6H-YIq=Fw(qRiHRgV=S-Sh4B}PW~*t;J~4fBgc7*{N;Eiu$+{CCvw z5Pr3eM-6_*MgHHzHaDz=WFxt-KMFNcAM2fd_H=xaL~RDKDkOt!M?cq!Z2l;g2 zEw#rb^QCG}CA2E#R=Yp#=&8_s`F!2k_XmW%Co6V+>F|>Lksaf!G{MAuYW|A0%u?e{ zNc&P}^fMf7;-Z&yy{M@We(IRMyC;E~mXjLFV7RB*8&o=WniXoys*+UfB!z2O{%Fm- z6l7Bzym;GdZIh#~>&sVq7hO!F*!9L~HJ(V_k(OO#KJ-=7&)SV~+kFpYj?X;$6<};9 z9>ItRhoQfa7hbRm?x-3j$U7ht945>#T9-Tg|EPkzZ|5x~SLgn}4qG}?{0mgoB;isZTRbr`* z(ksovWUV(AcNQ%}MDddeKhFH+z3MkP~YQsaAj^eKkEu*C0rPQ}|den)-`2x7E=y~o;jeG=Al z@W7y>P1yqNK}Oq)$J;R`{-2_s;3YO1d<0M%>z95*wB2`UguQfC+_w9w_Sx&YoX)b$ z(tfG5VsqivdMhmrr({OPyJLi&({*>N3R)`Jh~*!|vNwy}lv5Ja>rF)WRozUb?)RouoHHkehdu12zv`|uXfZo?;Icw$ zb!GQHSZpUW=svYJWZt$*>_~m%`=gRpVLive!~fINzs@wc&g4An>By<7r+%*tzjCzD zOxUyFySHPlhueuCb4?7<4)W9guMV;hYh8Q~|NTF7N%JEv&*O#ss0eRKU46Dw<3dWy zBJUwf9cQsK9M;;HZtsQ8+8^^}tz#0TOCTfhNM+aum=YdpcjTldCFejomY?x=PlC^j z$kRMxzZ2zsU5b5>O z{$ZwZ9jQxwG9dQITpDxue1FK|M4E!gf$zH3=1A(& zzMk95S|dzI0Bq08*#_LB+vN5s3*gq#W9-S;Fq2lEn% z@Lvl0vXk&ag7&QJ%(u88otzVqD_;#hzhk&&k11ieCt06VB-e%mLu+$OwKv<(jIs0_ z=Zb}^A|xmIZyJf|qz0=aygp)m(moz}-v0d1`XqDSZ)hJcHi9j08^M-ujbPwL!yki& zUczJGs|?Sk@0&eL+|x{BukcKaxkLf7^IeY3svCzFll`zd zrb<8#|S z#q^Zh1kstX-&rVhBq9Gh=vY!kJ-=|x7*Xm!mBP_Vp?Kbg{}FU_BJRvCBKwlxF(|Kc z2HB}37QLv4(^Uj0_Z(9b$!t!YuQ>egMQIIe#$W)bX#lM?2{A2p)^QDI5?+~Gw9yZ| z3$ngSz1v6o7CX4|V*Iz(bD1e&fp&F7kI;MCpJHv<>)-t9KlG}~M7I0D9D`8nHil)u zhm8okuG*!&??A`XQtMhyZuuPYlk zq`{+EE{P>;-`*k>_`j*GpR?KggsVzr_uHRl zC5bOG7%HEZUYbF=YJT*6;%jra_L6%QHO0&Y8VIwkX_DEo%g>17FZ$w|=SGqxOl3GO zVrs3rk%|SrKQt3cA|JKKBkVJ)(z84&Q~Mc~e#OLw<7aYIC5;*T&)GK8zv!BJWM8^b z*CDC9J4J=G?_^v?Ymv+|MlZwi+{6d^=#v~6I*BjgIDRKNuvqy%U9kz%<-K=uv&2EqEDAv$8!ZDY* z0dph4$KXMg4J$VEFzt5Po{!0zZhIvqy+eWJAe!C6QJJuOQ;4>k`_}+suUkPjV?ZVL zU29MWWoU7Ot2n28d{kDBcuq*IyDRfPqeHVxJeJ}n`E3e~p~axOfv(uD@+8&FYV#iX zmO;jS7n%*SF(raTdbr5MHt~#1ch~m8YG%i}BIzuDss3zu=<3#k9q}7o<7^^r6xMvL zB)zXEXJ4(?Tt&>fT6qr)%$aRMVwG~GfB?SK@g+%DPAm$|(#owl+ zMz-8GC(5T}U3#r=f%qS8TqCTe*p(Ag%dY8pRcoFQ6B_Y%etEUs{Xu!!z@THGPU2rr zn+L6y3>g<%51rXunHuRrdfm|-%hp%4pqW?exh zl6|$htiw_7ESuWqM3b8CuH8tDV$8B~-w?JNsdZHWXG%Vk8C`wxCFJ?KZVTVGxF$AJ z9rZ-$&a1yQRrgd|Z@vDOUb*S!6W4mSx1q*L8^_&hKCPKfSs{V6 z8H1T?-Z1*Uv?c1OX8HS5r8?e*RBjX!1Qa5VZ<$&5_~1X83C z;wEa24dAi=8p0ul{XNBr#!gkW*cg zYGH3yQ(t*(SfY7fOlw(U&r?O!+r}it7cP!F zeF`w^UmQ2+r(_40Svhf%!c4kLD*Ws!-+XIwG+Wiy6{%zpk>E22{wK~)$!fm3=PbLa z=iH`MRxHf-lS21J zYEN9MB+7nn9c1k%*!=2hvg@O>2bFGrXLeA!a?CdV%&Cx~P!tdLRNH0FBoR_ix6wsE zrCW>iwj z)yl?(a#oVgSddi|m*f7mxl1xjn@v5ig;v}2^Xy$}NsT3Gw=J7qru{Tdg`B)>du!86 zw`tS5s4g{j)q{PaahquE8$;?kVq)jqLOPm4tr1trHtO|4h2Tn}>otisdW+rVMr!z} zZw1zSm^KJ|FusTE0sj-$cttY(_>7-qRukxvT70yv?^|_3>Wy~F4sh1GJ7Rv;Kudqo zU#ibsZH_l*hD2-N`N} zuy*B~8KkDS?oCW8sE=%ZJuj!;&AM@O#6VfSe_>}FqtbnA;^wTuf;M?NW4%hYGU-c4 zfX|Rr24PUvRD(>7&~syJ>{>=vYYz)UrY~VfabhE#2eylKQ59|>4qLCtx3<>p=~<)7 zPH>b@4>c!{dOv4B3rrg5$X3mPB``6$zPVs-FYBwe6GO2_SFAbXUZ297AT}j3cy1BJ z<8erDU!a^4IcIf%?0Y+%sCrYaM(W+Sv&_jg`Ax1&Mq5qa197pOcgf~fj}teN;${1~ zBtzq2#Q!RaPpL5XEgjT-h}4q?YBOBjm!>eoNCeWV=B~)}uaPtt@x0j9I~9b7U3OKS z%e;--O&kkS9Zi2w2ip5G*O0sCx$Kh8ObyXM2xFAZSgu@7**%!*YcGrm+d zl*QJv%O1#e;U$%|FQw8^1&SEeFE?DHjzjv6AnC9;*k z>fT6>F-A{Db_I2`9!~FD@r^H1Z5j|jC6GDoMIAC9lok$j)Yu>exw6`~zxSv1EUM08 zH|t%=IY`fJU^zZPOGo;(JAl(n&SrLy%%8Ak)7NnN8`%a3jp_@S|IGii*}yuR(-FoT zARBZAQj*dPE!e>n>Dbg+`ka77qlMOO~1(z zj~qaXH?!I&(;i2qxc0#4^@Fe7U?IIFun(zTK9_-3`G68w;(Hk zkT^tW0C`1!4D3kX#DKv%Ho-dz1kB4NyR!o#8RQw8!IZoc=4|DALQjpZs*~%pU zS^^K01a#)*!o%GKx!*+-3vb^YK;Dn*9wKXiDW05?4S^i-dATJ7AW9)F-o!yolO*)( zHjo`agx_%tn!z1#;rskV06&)a0XVl4#YYACC+eUd0HjYqo{5wJ&+phlfCWeK z87F`OByxJQ#u*+j1ByREG@B$+fC$%uxA1rY-xB0O0fiuajSZjREO49#U4}i)Sq@nDVN?6Z8QtYy~IoVGq*^q)Wj!{9*dAk4|P+qDLcd z<39^=E9&29WIKAMxxZl0w2=Zq(~vl7>x-Nk)K-KNa1par0h_Rxe@8kZ%s&{*7yVLo zz&;4$FrXEv&t<~UYoZe+_;+Ms=f&;B(L}#=C!4T^;|~9S_Ru!4cp4iim%ED9Ofo2y}8aVz12an+dMxz+Es>D)?Hp_omP21 zvDw_YG;7CXHi7xcBL`enD8o+wg#!II#gm4a{uiB40`aAl`wOWNnx_QKFHkixR~{I& z0SV}=LeR%j9JUG1Supe#;EpIoHkH5g2ipFav&doKstRCBE?H_GEWfS|NKz1M4vg-J ztq61iknBe~hDhgH=#dgwm?J9X?`{JM7UnSSoH$!9?RR3QevC%;asudr-zzzx0D*y_ zg9V1CKwwCu$gR%-EmaPG- z?wUZgxrarhPANFYaeZP6MDyJ9?7qpPA>cSjVUy0o49u?rp%K7Y4P3l|1Bzb+%-<=~ zg2)hoWL-ERn|T1i)H~fdME=RQ9YVn6?}lTmB>9D{#cSIT8>0%8s>zL6tEaeUV#&R0BgV`yx0sMD1bq9V7d$^8uR3lJ&hQIYqY({$oa2po)5STr>Udsh4+DsoEBvK57B z+q<%as*dq%-3+YODX9XA8)r&X@8nXLnzL0w6GwdRN(3I&V1T7)$KfV34NK+-ESW=D zr0*<(cRriE2M24IsZLBD=JT@C7R^pceuM#{un!Zti(7*a<0I1FWL#P^fECya%eHPX zCCU~k!t}(e+;COleZ%=N`OiNRiWg)2gB7+A!2l?nzjZkMv4@7=#mA!%tW;_6N>vB4 z_~0|Jfgg}#asxxNsDI1Av*{{I?ZV5;35eIkB2#J%SB6(n^L zH?l~NeIPcJf>>D4>VPVnbR~Kz7F_)9+_w=LV|xim0~05=H^A&cl3I~Sf3WOC3sHJMil99@ zD1uJFe5%w;oY)E$M}HijyB15dZZHn3Vr?WnPGARdF^=kZ+~8hnqZ8PK$>#8E7%6}y z{1ZUQ(SufG0GTULmbCds2U~>dd7Oi_Q3yL0oR!NTFNALrRY z9)934sF8|sy9m@7$lg}sHdvLuJM-uxWL*0>C|PS`P3ZRviZie-&K$X(!#vtqUY z{6Au?fPCkB`nX!0E})NsQSaH!L}$*H$yjwJVAUC-7#}aBR|6Zoz(6~!29~4vf)j=p zcx0^tyWo*10JYV7tVn*view>V%15wBA%>&Hd^Uq91dF)~rvSov8sIt-E#@rLN06mO zfC;#nG5V6ocRVZ?IsFWqyF4a5w!Xkf*}_rWf`0@0#p*akhpGJ$(L@h{Wd>gTx#g$? z_E>y+l*X#TOBPzbF#8Nw3w6*Xz=icV>&~&og-XWf;s%A0&ZXUSoo?X$$2eIu&YEw~ zVd9PTC6oi8;5IltMBXoob#Xf1$P0l1T2!i07KRodn@b)RymPSWxa^MhCd0belJqhN@P;zY3fVQ~p4(mC!jG#VbVGGJahXNp(r zxWl7d0<#6Sjd&JyjAUIfXeVsEz^z#nz@@l1VUgG2!sizB37Y7L#5+zUdeYZ|Pk`54 zzka9_a4*MvfL{)KT?qW%pE}6@!f-oaW9<*`-!m7x|1?MyxlaHArxG0IMPW%kuiI_l zl4wVqM8gkge4kv3dua zskM_AGCy?>K`8V`ixS#^AHkoY9Q#8r9J|pP_4!QG#y@#DjLMCy?1$v2JFkm zayLvffllz{>i|OLU@!L2O%Q2EF`sFS0QeAnsPSL(R0#=wk%ZmH7_NsaCJ_KV5c&Y* zqk>Kti`@mC6=>S@z_da00On<47RLl++I@DnD;OuSf;_NDYWb4n;I;$nVp*gGcm zeEJVh#!^RoB89+}o?uhTe_~!!f;t(^Fm9wm zzfRzFfUt|foeZ4-8+hCakr-p(cNow@+XRCMYm?#k@;g$l64rn!VTEW*CVdW^z^NY^ zq+>D-IvLXowv6%sG#1QioG}QXI|(DlP)Oll?)|=ld~7*89*f*bW9~mHlSiL_jD65d z?n8Wdau|d|jh~zn6iOj(_ystyNGpR&z+$&fpx50KYYN!097Raz7x%WK zO|frnLj@FSVfFQDnv}c#B1b%(9QgpS;(&Gp6k2K}gQu6Sh7__JZyL04o>~G_AoYjz z42{PK@YWde04@iFvVt7AFmrhL-H&Zk}%9yp)rPOse%tIP~S0e z+AEjki+3ZxJr5L6At!|}g;!rlbcMtJ1(*rco&fg~j=b^+bD7exhMQC<%Mj-(y6|U6 z0kDP*r*ZIClyTrX;v04(7s_GQ4hyziTqBIkDrgq|Bk+YMS**}b#DJ5B3fy`MQGqWb z!UO#~^ut;NrKT&q1`o~hf2;5DXjrOi)q-z04Mop3MvTW~1X%jGwGQ6@BT@w`#S+X| zczs8y1eXr*aDVvt7BD36j`_FuhYevTulD}t{9|MgCQ1>xlKOHWc^BpYY;152_nVjq zV2BswyhL%Fd=%n141DOxJK#Cm`lFSbN8zvw__&X%@EFB{Dm+Y-H56Va#Gem9{)cMX z!xNF=>JBN^3_ivBH{c((5nuZIFI|kg0LCl)uCdyc0ENR{d^s3P0<4&9nLG+NC$q=8 zcti+|aRFrMf|Qaa>HU$P0>5K3HUfnXt-R1~|0#MD&aWVK7?U+RTnhU52u6G$e+t&l<7}o7RUifXf5_2!ttYSap&Abi z0~F@|wGYFS>;{wuZ~uucaYvaWfF1TQ!}Ji#dRg?#uHrcZfm%?9gM|Q6vpaZ9^RrTb zA2x7vfIEH~9djc~J)9<$q$fbpEou&wTr@%C3uFr?aVh*c4)z&$vEdOe+c|MA3M#)i zWW(r)>;Vs;x`LKvsQkkjgBD)-;T!;toR55`pvngP1x`fN1#q5EUQpy+I}F>zv3y+| zOHwFC^GC(|oj3t7M4**wf%wERIEl}Yc$4x1LH5N##xezz-#3954|yXY6jyoj&7VuT zGbX^nsJMxGM9_m{Nf3RJ5912_t`cQ;qX+$D zOmgJU48fC#A^V@PWOyxNbR>?+j8U^j zc>Xu9NyB=G3aA?eh9fs4P)g8DC!($3I}lR<9&yIw?HEi0F5bc(PL7ifr_=Jh=`?K3 zcEO)vC$|R=78eebCb2#anKoNJ85Dgb(pl(nAj`0^{V7v_HF(8cYb0@P4 zFeJwIW1!ljxE8iiw&QQ`YL)m{&X0{6M1gfuFhBKI)xDvHQc6e6;Qik|vHzWsx2U03B>I|9t&eb|K+36c4Y5N<&*8ISv_D z%gOsM0w!@-JJ^Q};`rhXIt^I#+iAcZ{xm=qw2cjy2pBGJ&L{r2;eul(od5Yii2@rK zBFM_BlrvQq z>B2Pv{^~#*HhSRAf>3{U@go?P`XScnT?0%{gw>a~Lk8RQ{~Ry8SrB^v4}$j}(?jD@ z6CZ#fK60P<`=wK0LHE_)jn{dPC?;}!;Uf&9NBX^r`YDC z@3l~x@~=}SR8}0>+kAa9ELw?rc8r-Q2olpSfG$RcpyixfJn>>x4It4&H^Er248#)Q zy3ziHaFY%$h{o17z%?w$m4&k)hlLwL-8R-B1eFxgH787B)cK5XzJi!sDyk^%xnPX4 zTlZs7XQD*Tg_}~x$ALjA@&TEn1~q7B>PBoo)b=&p;~}oGg$?;qJ*pfl)LTD{u)$fV#N+j=IF-<9UV+-U6Zh5O>@`*Wqg-1Xpx{ z314=BWdbck!z_U>atNx+*i?&zue?2>BG5uJ_|O4J!lpQSRscx0@o2<`mx?$~fWu2@ z-lb>8ogOW_z^y+vmwBTtQ8QS}W8UxsPJA5&7NRk8Fno!DxAO)I%LLwz1iB-K=GMsV z7(7@6IyCRXVxE{gc8D~MfvTH*$T#j2U}!1Ae`F{My(6Qe+b`H(;QKuS!U)=zqFn9V z#MQ%17k(skL4!AxA339j(#ZF8-lig7-<*xEbH3%SyFhFb1CmxzaLafA$S_lz!0rcd zzfl=7_IUy;btn!FH%a)CE1Ze3-GHPSydF*_A!@^dG_exlE-CSwTDW|PD&u^@0ypFM zduUML45lDlX7xL@lQ?HwT|aUKZ<`0vZGuuKI(LA}gv*M!9GMJO@SKkG6|U1&v8e)o z5e@cRTt1Ao_1slQtcLMd9iiL3TJb>boL0dWU?!vK~%ECC=bz@wxe;cgp7K!ygx7T??ulzIdaVoc|(1v`MzowHzP zE(j@DXWpB|;f^MG^IKTXxVwOA?Vzep{CAx*q)~#M^8^}_#M`?h%b|CESH{(#hw*yR|5F}KljN|4C&+!JyoJ3oQE*P|estM=5YyqRL!?TBO@BXU6xFT_Su{HeF#8t&Nn7FIHN{Fgz5isLnG zzJ(4~$Xt=`3-;%DG3O5Xx%)Bb66^Z^!ov_(;ZPWQ82)gPzXQdu{@dv~Ts$B1&IqdQ zu>QECbi5Q9Q!gZ(#y!R92uhItYn>I{_7l)qRhn%=1%Ab<8SgzKbWJV^D0P7uPHf-Y zA+*&)O9XTuSB8Pqk**^shv18d)`iIA;fI-gdDN z-^8%N9aHt89s+A%rvM6gLTtfFC`7|Cj1S+9bmj@CXcM-Q--Pj415Kk)UF|5j>^I?pc{xO(PP*svaBq>=Z*iy>k`E0EA2Q|9EZhA(2d zpCC4X?IEyQ!c=UdlkTHhLj$@H(?TabMae}ZR%u7eAp&m%!Qq#%S|5M-`aA6usv#q^ zQ{u>A8KWiO>|%s=n$K6+VUZa#hrz`V0Zut`#{p}h1V&r<4u#;B4!$=6wFc;$?_HxQ zXu!g8km3sNTsTDlE1VPm)|n$;1A_dJyT$|^4uflK1sKCE9;<+Yf7yqd%%+W*^ze7a zQFVD#k^H05>edPe|5$J&uwv1Tdl}>lZ{xb?2T+Lwn!W4@XVJqU>fm<+obPzQLSZTD zcf-xQ60DE^Od;pDa$7NU7X!9p`1@c_u$T9S-(7^n?@uEf{(B(*a3*T-GY!76F?PWP z?^pz#-@aXGtigBrx$BtoYN-yUTjNfkhjd~4el$tUsFL|#k-sR1pM(m71 zele<^!z5gYHg$r^859@MZkw;1VT<4bScl3f0&k6>Jz4}f%8$pf0Sp&1ptRnI?cm~r z%HbuZu>m=(w91a!yEa7IbZqb1njQK3aJfu19}zA^u;t{>`$mNeCOX5nJ4B#rBcO$F zH_!vYRctkv&eMcpw2WDYLf<#M4Mc}!%?ra%2H#@kw$M;k;4Qxdi~;`PSEzog{A0Pf zj5jKP<@)kZAm6sF${2?aVAD1cspgm=WU$tXUzlTCPu&HQ!?%>4Gi znIF2!h=IwgL8K3c0VJ_80FJLzp!v2ZIe6BzdObGQeB3B2)_Pk`!$v7rr|Gu!GFG35pMtH0kG=Z`vL=TJ`QyEEE=C2YETfE;53#q@^ z{)4dshxD5S!DtjC~42H!Gp&z*TEYFtrwD7gR9MAy8>mYt@18ki++khg`E<5n5k2rLHow zhxWZkx?VHK*F~JYh^_k0^&`!&C#fNj9P6Y*k7!q=x(?D7^lWah%Wm~hJSHg?#xhc*d0TH=V#FYk#LIZd3p_Spl3xp?9G64r^o0 z_(z36pQYoOfUJb#PKdV;I}$&OA^3YdD7s#REQO7>tY;r_<8n}ZKE4o$aAI>Sh(S(18`$p@#8f1yIRS(CU;Rko3*w z)jO!!m|aYO7HDEQihTqn@bbmrD3~IK-F+gR&`P#G%qqG50aUe19pV(wduCu~??9HW z27lwGW)qfvM@Cu(vBJ;dh@S--V-ryN^VMfYT#cTb7cW&neL^34Dghs9$6Fsn52pQ` zD&7od54WE8vvl|ee?uG<0ACVNt8h;O?|lbE&ga=Mi_;8aQ6RX=+NG4xrP(3t=iOgR3Cu!Iy%u4m#QoN^y%LxW!?#JPiY(e{jEZ zA@Z^btYXS!F=Cs3GAA?1H;A}VbF*9YKQ{$!R;z7|LCD5XIQta=At zOm_o;cv>ANe`B(37R_PlcqJgwJ;=-ku=F`+3mP;zAPGcH&H<>cN@?()_Ar>xgq+#K zDL@01JPGK*04)U2Rrzks^vHRbabXtsl55jgJNt>eJa0HP;bbsK;H3*kTrkg9j7dd1 z)XgM;Jg}DW3QN=#HNC`1yy(~Gfyh&E^8$8u0Y4wlIMjr6TmUk>7|LY< z{IoF7bHOx|3&fg0b1dLYTFvmvB4_rjYm;;#oPu<}l0C|2508WPzXnBNsz7=&Vc?l39pIq!JO9Hx7eF%oTgVZ}T!ETHG8yF~p+QhMGS3CBCSq0OEGJS3D}S(x{5iN&0#dNhVOym@OQRMjSZFN0gc91&rfHgF zW}f#s=gdqdNx8W1`~LI$hfir|CTGrhF5l<-e9sBy#uL!#rZ>4=3Jmp*Eho+7E=q1o zIOz6S2fToH#=vkZJR?RowF}f5ui-gKFrB~kH6Qw1C!ICmCn`rbOHHSRCM=O__A{WI%&Je> zTVu#4g$yTPLT|=F3rp_zv1uR7avDclhbd^8D`#+XLm<&L;v*GpJr#nscAfA2K8p`* z@EGl$^{;7YYGyH;r)KVildp;%ej=yPWF|`bHEA#2zF#iXjVB}j* z+}F4xH0n3Pk+k(j)Di*CW`9gSDXC6E^H*~C z0x&N}cK%$5*Yj8Ji1-^uguMD?8fc?R{EoFo1611Oh^|$Pz#+9(3!rPYQ>_!jc*r7C zi_ic_kVS-8Tin7Xf7am->b6e98!X;5Rtm6~ug4#X<)4JjIfjkoo?!efD30Zi;~Y=( z9MM=FKM4=m{5G+Ft}eR;*=K64(A#(Rf|3pO`3=JY=DMb=j(LiqcvcLAD=CP27681;+rZ^=0HPJkUtTU=uV z?2#H^SDl@`P*|OWQU#@U{+89wYEs0I{Q5JguvICwyKwZ_yjEDNR?i&YMJo01xA2b@ zYgwGTowKLw>f5|fROJuRz>;qIRex-lIaVD_W0(ywdpS|74b9nakr%g!$*ekQsZ=Ml z+QOV3m|S$l3C-ziVbeYS%O{t5Ot+}EUi4rrSxU1ggHm#O4;Yzs^1*jBaQX+Om{7x7 zXj0eX3r-X~30*lHNl6o<3WNtB<#0=^{jXbJh3X}+#73r;dm@(Skl8ckc9q7JaJJ|Z z7H@A|%LSOm=UGkGVQJRG3KeTL@aB}_uBaK4SNIwRLb!i%k(|cLV=VtTf1pc&+%z8a zo5sBt2qktl&|>l4;BZI5Uaj!Be|DF9!Zmar5~(>Z(9tlj9IX{CYjVjG5OaS$6f{=O z(Y+cPZIg8}7)R)qnm+v`oY573elKXu?pV->`+*|KVYR|aMj@=%D1z7aZABFm+=08AtaNj%?F?>i3_Q zzj5Z92@yqeF73=cJmK)X$zN?cj(3gny=~;VkKT&oLrKVDjZfr*J8*J^@k^?_*5YblJ87MsTp6od?^}5zO$&90?me zhCk^*?6lhp;+rb{p7%4P*_E*X@WBI zD4EBZkNsEg@5aj*P8tTMTEZ}kE-Xz}E(QR)bE94a#mgi7AzDt)=zU{c8DMq$h!c%Tj&G!-3g0*rL9h8=80?l8KUl^GM#4?9#cehOV3Oainnw$6GA4Wvc^qUF8>*l zdD_E~IR%A%)6vPuzRALh^^(8~lr7LL1XdR+CF?qcejStbgU8HKIFchFIWkdq`E+P4 zMmH(#TmNrBj;M8Zg&~fCE#YPhbJs)kl;V*9cCD zifJ;A)ktcL%Nhz1w}yd?j9bg7o#8%mm9%{?INT0qzh-I3>gSk-dF7}`82QTNCd0J~ zfrmJ>HZ|F^&V_Z2E>&Qe#o_m@frpL(f1+QLK78m1LV_a;nZ>zVfB#1dplFY`G6$A( z8SOmz@A5E4VOVP$VrwIs1h&SaJ#mMn*p)Ck2HC+a-t=DtUojMWjN=!{BgZWk(@s_h z>*Y6>0Wt$EG^=<5a$mZMP=p~J%m)s)uy`Cg6Vm9JFdADA`#B_^tgsWm`6xluY#BWt zoVVCGA+6^WHQ;;;aw3S#(7;zC4GBX!rNWfOX3MA0*NyQqQ);a^t`^-)KW=8 zG}!>QN@(LpypzlC>!I0QAh3JU!&8omrZJXeEz(*kNYRD#sW>M6QO|A;yG5yavWyn8 zlqrxU!JLJ8YA1X z7WOvt*B>jk`ZG*d&Y10tYx7efg zEf6|9i}r!gf!fEcZ0PipOnbzbU{P@r)T&sR@ZBJ_yV`BQKp=jNgJZB^i)nw~}@p$6g7J%4yp z9;>Sl`taVlN>j3mQwMJ2bclyZ9J1cf<^?Uu9Ms9@Bp4n$?{7GMs`^=DIZHtqifG}F zC|dZ<t|{W_*-TZ zt;j@r`9ZtDJF|_ovv}F4V?-unz`r%$v@<4fmB+z$tQvaA=>z1g;)B7|WvDXcQFSa> zs|9t5F|*xha-uDCRf8#ovOdC<5sg)uW@bX$fR>FLpd0PPX-rWgRc5#X*!&i>)n(;4 zRkNVa;BlTf&yJB(mdUixz@*<|r@WV*xzGXrVG}-@TcI{UN{F-1RnltcRa0Bm5OGOJ z&jcnDy*t%2hoOemf3w%(nCo35;f_}L#p zY~f0eY5No13#J}jQ!sTx_bEr$6z2Atu(Psfx8z+DcG{kp2R7$^c>ME{H+GI3-~F2jXIuwbCeQkQX71Fk$yH}IKJ(Nw zzzbWJ50ZMp{ApOjv{PGATCC{nG!(A&81x#sy=uOp+L>SHsU4S4w8I!IeeY`A)+SUs zgz*AT4EQCa7(kW@o8{SlIQeWIsCA4vg!+vt*c%uvvR=7ONyV`dW_N@logfC$#N=-v z`IeE+%A&{Op|d zo6oc&&6|Ee=x@cM$BJ8}M^G+7;adeg-`alzxQ0N?Gf4fff&&9@{?Do0F>;agE^=WQ z-$zL)Eq<%AK?FkC3iAFFQK|A)1VEiXdR$I_5yoT>7pXSu)h3Q`$UTGnDWe_Y>Qjy* z(3y;t7q6*H2*N+|HG-Ku(XyCILAW?8UyQB*WnQ>m&#P90;jOUONbjJ4F%<8n1@QA1 zVPe)v*~sT*TO;r<#J;xlWs-*FVjP~N93oBRCeX*frlB;Jt`*M+H4WL*TIhDI73+e+btXYdSt)jX45U2HL~E1@)oX=oKhQFRQI_+D09ehcL-YA5u)|DbW4p2 z?eymeW3dOQR!+NXgag>iKiM2v0S=2)1#Fs@BP4`yS*{vFP$)7^^+O6Lf*(%Nk52qU zFRxUH>c%cdlh76F2T^C1=XLP0Ai@mH6xj7(dOZ#3VR5b%>MCzSxh3)o*Z%NY`lJ&4 z=uos$MotbYWk$(*@WUlND8TelVRUk!PT3Aoyn{!P>$> zCb4rpcJFy5?M8n8c8Hj_Q4kSh8|B$Lp!{Kl&w7k!Jf;>Z&0%E=NkrC8I2sS?Cb@82wa{UzaF*GG;cuZ~V&_)PCOEs&DuA19By}K1 ziK*imFTuzjhP~fn>h~J&9w_@m_|2uXxNa}xfud7Jq!L)ez34Gv!afNKDNbd;s4!MN z0&AvVqCvRSG$39hVs>hRqmY-MQp-!3(JJym2=JiV)X-dCCm`a*v(A~f+%60S7Uo!u z^f)N@za_)UNfix*(V?hf6E4jQ?s|vVg@0!5E>@_0r%|c>GsJ(2}`_2YOd5sNT^^m;ZYK$Bh^mfQ>S(1MQ_ec_QQUd9~3LH;F)~V zI*aYoK;av{|3{r6Epu#!+0wX!0g#H{%*F+UH!EGI= zbwuq8Qy|@2Px3YTDdcMZ9sr4ILSK5>r*x52;ugC|VL3#MzK>DW%q*0#aJCKM4J{ND z!jy-)U)&gWxY`g{p_JnZ)3o||H(;BFDuP{xHtK@JCexY@=WG-9l!9Fbbf`j6APi+b zj66^XwU6qN^n-g*%Kt0kEyl-ERDp)D6Qw&!3Xvl^%3UURk`?YPR6xk;Bq|_02`Wjl z#X+2hF?uwxV1_+A+-Sgc#k`tXp>#DUP`Iix-55KV6sGxYHAOvZ7&VDPIgN%arYmKXW#(cww#*^o$I$9||4*vNXiv|92keA)7ij}`bAaHCD#w3>PNTw9 z_1$cnXS=QB_w{Cj8>KeB3sD2WbC zx+pf=qyl*gHG+3`SweUfWDR-jMzag{A&v<(Ejtz|k7}{|giP87uU#A^+%%|zPMN2I zky{XGpQt6VV1m&N#x9GL{u$lcQ%f1p*|NK&AqjRpMK^0v(>OyZZ!C(jVNAt>qx2u( zX8Sn2w71V&&!N5jQY~w$K`f_6$WpZcp}WB@LRSo7$OMNZS&A?UShrUfGIHQIPsBSN zsAPk+_8P3ms5M}U(`k?!p<>^M)~V2ncTEGc34U)b-Tud-y^aCFXpf!16LpGYuD+38 zB}*l7%&CWARy&BMu~prGgF%2Zy|I$@`)>pL`g*aTWTHDAkQHB!>bF~nc=W0qty_oF z)rfKQmdD=juV+_mRV~#{I)^+w5ZThT&U>Lr(8ezvLm$<@$^J8RcZSxxDc8_pV@n-s z8XZ-Hgs?am#D|8B9=Kq)XhKsdqE8%KLJDZ;A0@@mo&1}STkT=FWz@KE`>63K8(X4B zj*7bhY0G)%is}$mkp-77zr|kPfbCY{wH}hPr%&~3M+FK03>r)iFkKRl8+@&~=Hm%o zO9oJ;m2{7blC7C2UGLofs;3bNJBm1hR$FTgHIl5Ed|7KW(#2k*bq&y&gP}kKi(+cu z32|Q54^40K%2r!EEr_2iQ}&i|wxb??KvPzV3%VtgH4f*}U;mk_Vfr`8R5y$L%R#qF z_Bf#8TeKt|qV#$defNfm0`xM?z+4-uQhi@EV~=jdV%oT7F(R0z{|<6dvdZ^z9JJn$ zo`3$~)7Aw>(oOoxlQ1Vkq zADa{PtX}O{gP-A$6)k;*=uIWNK?Aysjv$)|_YxrPH#+zAD z$x9QAinu>&I2Qru-I7g2B*O&E`lZ(DBsC=G(0x3u|HJTe$d)8sf>882y6TzjRUHZG8L4H|ICiF%x)FxfBk-8Nm3DVUx`%qb)c!PhcR;sCF?2ozQV&tY=}+lTc%L zoVij!O0rV^X7Y{@&VLz|mU4I*DlPS+Gq&k1u(HPD_eO)ek>Xc8p*D`M`!EKNc;Zt< zf3gW$218l20fe%9Aq5po+@4jn3DXree;dY9xWbI|Qgh@iG)4sNT6aRhQ_DG6Q~4Tx zpnvnvHQgHVRzzQdb z6bYa>am%YpaW=LKB4u=wy|sRMHr;=tZDB=h69ODX-BhpE zzCsEt>noR%=XDwBEMGeAcUmouv&CW_%Ih>qH+J%#hZvRV!>Bc_t+E#sxv-GdG>=KV zlYLH%MzL&&k-jH)k<*yZaHOIXTzn}1%R+#vS@4T-W#v2pGs8k$SHPtyJOcCjHxPcH zDN(S36^~BVwniq^U6OFtp)Nx#-vSN&4LY%|?4$$Kupk1*rKU65(qmN1*9 ziVLws;RV|%N^bHkLL6wQN>Zwi{g(iSt{c!Y=b+~T~2z%6*HOE-2km#69LiJuv{c-K4d z?j#A(DlK+|k5cZ2WM9{V%VbC~5 zH)vuNi3)DybsG`1E`_dLv6a22W3$0Lmna)|QL)Jt9w>ZPcL}RiEAPIbFbumdkJS^p z5b|+c z=#8oOhPnxh_2sPUQimZG!!~K45FYs#d}rJ|UMJzNu}I;~TCK5#TpX_S9vvoADkUAQ z&1iNGk`#eqJHJphl;z-stGa)k7OrYhOzUX|1Nv%yikr?JZnCFVzXwByVnQFT?5EQ0 zh^e?ub0?Hwe@o{ALD-oZhJ`eubhhWI1#1>6*%2SD`9V3Wiz}Zw@1>m0g`B9e)gQZe z9MGsGE8ie7Hxu@vlv+ozYZ^eTHTKssp%c{rQsy=IYy?iSs{)=o)F!O`ocz6uQups% z51%37R{*uJQ<}ODtiQna_MGqvmoJKmZ$l1l=Rj5fcP}q25)f;A1S1yxn?&N*X$jw9 z5k3^16(z{8;#aSm^Asst89l%(502~$_gb^Oq-E*U%r&J`w?&kG*d{%{i%LC%`YYQ1#1;8TK8<_a z&?S_ElxX;uMsBAvI&81LfW?$)#io4|aG%Q%+#JFH*|#2qQvQP;4wGFmfQPg^FiM(& zZX=9*mPWq{JTYr@neiTserK_VtsFrZ?J=dOt)X4`>zJ*0Z~@ZO{P8xz`e1#4ZVzPK z37-EmeIcHFmV!mxPWfBb-FblShog^I*qju0VxUpk*w?=Z&rG3c@_KkKWoCBZDQ|c@ z96piP)3|0Tm4Ws2FE?=O5Y;-fv72z7;;j!B8<(z|@fU=&RBr4%Js> zYP`lrgGA(Z8EKDTb3-qE(L?v3$YZBr{Wlo9oz(hfSC9#XVN%>yy8qz+4|7|)Y1KU# z2Qo}E*8*2}AtnDHdh!%7(|4xCGH1PUy{z=4J_;97YYlxekT{&x3q}oN=L!5S=kv&JVlT%emBAPp9u~F@l@>SVaywWdI@TbDVjqR7%3i`hDM`lK^G_BsCP zyPDxx+H#+>?`+5WbrD8E{?!;4lUU+yNC%HFNf599m}0gI3-II}9GzA0*)3qNc`0s+r_$sgnX;TxXp}0OJEIp}sfZW;lS2-ZrJ|vsE^IdE}FY-bGf&`DqC7x1#de3BTMO40;TZ=u?lK41gXJd zCfbQdN;vbiOF>>qukX{1~m2u9)-5liEG>=@;1Pn_yo+D(>4|Wy?8I zJq{MM2}^?T3y@!wCWo8h$b#)~{Kg>2soHpjz0H^$uOlD!yXdEZ#~Z&5gz7;sOWF!# zRXdlXUJ`D2O=g2^w5;{U`{c3Jr|8eKrT+O~1$m0yB0}Wlf01-gc#c!~S*U@VK7$i1 zbH0+DX-9^RmfQk9nl1s(&TI$qSz(#97~q4>f6n+DE>{hl zV+$0A3xtNzpY1lAYjQgGe40Dnn$8B6B`%zk2asrqr>$#GBOd@8qX1WTzbJ2GYd z@L7^g_J$jYD5T=d9hT=4h*tks4y7ODpbV&R{#WvGk!-7G!&qpC z)}ODtB*IOnVyJE>WXti?PMt1tB~CO<0_8pNXOaJ9l9aI)zuQQ;BfcHr_wOkaZ~?3a z__5F=vpLuchAS`11z@Jr?*g}l)VR`st*;#%Ciu-B4MOS_ue%7OhY<_7>H0>2q1r!N z;R*6VfHpXbCJ3+h&$%|abSbc<-8Xx!7suZJq%tS!N-S}OFG+X@qXsCEFcZw@f9dam_BLS~ zJ^>9T2b$Qlr^)yo>aX!<8NeW_PKJ}cAGV|W;`K(U`8bH@4=Oc7f5p`c)n_!=*=R5l z3@ijX6EZv$bv>NOaEUo{k^xv$IPT3cff7SXEn3L1fr-Ca19}LJC_Cjs>%&(nNJQz| zpXK8NBexx$averV;on~nMh&wYj+@E(_G2(y#n-~r7e-E%9LJ|nM|w>b)|8`({KIQq zl0%~rMv6A+I}XV1;uq;$bphkay$eOb{TU5!I3+!2e^&~}<44gjavGB7?rsn2qn!BN zfB)kD#Tm#r3+8&k=h%WF6<4@$AUFi}k+WrEC2?EX;|*YT>aIau4ot*#z+bmV56d|6 zSC}Ll?YgVkuP^R}b3cPU6_lq$fw_)71-}E#x!U^h48dja`5-T1&?OcD;$z^i3vKBA z2j#D*SM>ps(Gl{1#>qnb7VRsp@WU+l8;p>c;4-8#$1s^;^=y|ZC!2*eoSNeoh^4fe zI_Cw#T(B5yl4M$nQHZ+1lD&}XcEe3qtQ1eYo^g3=kb^a92b)q8Z%6Cv{iR=&AZW^ekpD)?gO5qpAnr^T}xMU!tO^yeg#8mddX4%@S zcs=nFjGKbhV{*hdGR9*2Pjh9*p}iSS!JOSCgN3cvB$!@gfE+dGR!Zd8dJELJY!Z#F z@%>@yuV<>g5ium6^rH*%A!Kw>^NO`W$dHQ)O*KNQ%`V6bul0fXc(q%wdr!~a;6+pa zc8V15Qzi!gJKJeUn+0NQ45=Wglhft-g4bbzbSZWGE=jBEt0gzgF2) z;J1?^nE%oMS;1+OWJ7*pj_RqokmI@8pd{iY9_Y&<0XkR&gxj?ZID`mdHJ@f{E})_dqKFYOXY=HOhC{7FsFg%ar#BF z{vZ^R$TiSPF8VX0Fh$G(`_aFmdRd<^wO?b*U}y%h^0zimgoc%U+hIYS#6T|QtNviW z=?PbI{6>f_TB!e<>!v3{|2GvLVgB#8RJQPB>NpIR5~q=-f0}SDFhCS-qNPzt68t83 zgIR92=av~9bEN1ZbO`7|sdhR^;~DD3nEQJ&IDL)>*mK4}ZPAV*(J}QUQG%LhESF!T z`8~$!HDe;r$`y-_xzKJ{up4@=!HMUn(A56|i5E4GH@CNAP*;t48N}36 z$nBBGB8qo=<%4T5!Z{2S@BZrW7!Y4mIB7D&5R{YVGuVh9X!U@7DGlkY66g(a6CcJ+ zj9^0hTyh-Dxg~Cr4+aYHzJT=Z7-jncMM9W8FO+{)6Al#LFF6Y_)GRCDJVm}0u-A^s z3PUs6)+B^>^nX4$hj}x>2G$ZJ1=)u&Sl{^15+12;*&hSR?Q?W_mf~Oy%$?NzVU}np zRw4BjFXZ@C6l(;mGfW&3n*q(4dJ6`x3lM?9NlBR!aojZ2!$6K>vn_=G%+seVDCzSe z)-Y~NnQ;CBrp#V9s=@s}t54mmUR#@iH9W2D=zO?fXU%6~0qn+%BGnU<+fBZ6#d44x z0V()jmn(;6E=ZPY5fpXd2-zry2^=rXlyVY$u-0cIDP9FhPV$&Ax75?bNHeia@EnxG z_)vj`715CjSMAX+Qu*gS?H3a7#7 zIJ74$Xd*TonInd4Wot6{OQU+(aAiFy=261|1?@dN)y2_uQgN7sOCvF36Bc>SuTGC^ zH>q@=H@bXmHj-1`-A1zEKej)fgNeuixLhei3z0l<9(eq^`NVlx9c6xd0$yw`Ft3{1 z=J0xN*M|MFQ7k{3j$^(4W0=`+rFREd_W2vBX6WmyAunW?`b=fUzt?)h%2&^o|K9dw zBAoNR^<}eg3q?rI>qQR3`QEc_#M=X$eKukzpvpDYD0#_JXKmCEhR&99@TF6_>(Pm& z)8xdeY|%WmBRsIvI!b6WEX%aXSOysmVVSEmH(VCWSDUR%T@F)OTlZRU;sDzRISA z^PbD~P?PP3#m{1k;b{KZDKbf&Sa73l_CzbB8(WqR=oV=uk8kk`rirdy4tbwf@X12= zJXm+L#!G6PL|)kNSjlcI`BHVulaTOF~tGaF@;AY**zK3_uwNsKeg~^ zzYSo{T;{pD+aQS5Ip*SHSI(Y@ynF`6-4M-HW3zjGj$|-9dUoW|2@$1p+N9+knQ(Gm z8*}pXBclqYzTJJy(NTrD113~d_KZkAI-$ZowN3Zj55{lKeWQD>>%haw3%=izJGEP~ z>%-@=zZsQ$V8W|MKetb9*S+Y2OPh1w>Rz;|`D>j%D+7C+@p!}av#Z~K`B*^Wb`6TODq6HenjxYhT` z(4EnkR$2tH zPg*md9O^Zg9vxPTH&~+J-F{K&PV00C&nFvknwNPEbbFA~pnNbk6YI#l`JDjgzny0+ zo#coMq-7p>Mg>d^3aIK|g6%SUllgLg>`&qmUG%%|Xsd_?qGprswmvKmJLAsz-Zh~KC$YD}= zi{{rOuE7i2;QXhq<&V%2MT5fsMBQmHb& z$S{bdXU;KSkG}?=Z^iJ259zQf)@vxQ%AGtJANvFIb`)?a-rm73oWWDyhGp@kmWO2h z4Hb`Qj_tjMB5YA=OO2=BmV70dR?5+Ti%058-PSjAaeA{<$oTq5omsK8WRT8Ki0Rm zpcN$}|1r5kaVD4(QX%T0W;R&%8AgmG4|x=yLX*%Y*fkasWs@50g%vHrM`8vF&K&qj zm^rpygLg~V;-Utb-~Jt%*Ayv@-v2EWzKt$TPrl$`2_>LBP!FUIypJwg`BvGoNfOwi zp%Yc8IU^c##<@B&4!Y6cMeR!s_+LS`$B2|E6t){SZ&uM&*Yc)&KM_aPr?@Mncf=;y z5ea|%_j9jK+&`*Ss|BJ{s%!mN=ZU-j{q=;cmwNx>RKNS@{IqsZyWtOx>=^m}$$yS( zdx{+Uw87XJO2qUP2R+fS4F!tC@g>*7C)<_+)@bSarBM?UF>0aCh6>|z^|{l(<|>`(jUd)@GAanLE)G(6gjMgt=6>L+84@S;U7X;9Jz7!5KMJN z_LK|}n+HpkaA{V$d<>3%inWPlmDB~kedek`G8DN?Vou5@Sfk7=BopeV{R*$Tp$8s9 z{H46076VqNo3i_vt7MayDESADe{bqZih2Zgml^gz=id%Q70BtkP3x1(3>`g59}kx7 zWriENclHrwtWQImo^D~}Q&`TjXS#%Y*=M$O8^WUDQ*qef+kNaDNWQl=JT73Wji#)! z&YtL};je%A7mhOaT~w_)2*2hcRg{(TX?K$m{8QJ8X;Y+#dyPn-t6{?csm*&yFCdF)o0zCFm#&i zTiy9}e30XC48fC~*?Xv_#@>S&4SxDWn0qN@Iuxe$*y?dm^Tgj3%|-$pjT;Brt{#o@ z3pOG9a67G)xU5=@kM(LY_)Nq+44ph$8J|`8mDO=Fh3~09!src znEcgKuft0o#6^Bb!mOtn)sYap9wR%*co^F+;uQHlRUG#6mvZk~(~hZ{p`0Qov(yI^ zo+9{wX6zlyJ<%H@CDL{dea&3<>6XF2`_uXQ-O;N$LWvyFQjID~=~2@hwHgflQuxFGQ$H+UFMAlO1q$HlZOio3!3-) z1k@5O;^E9|Y)XwB_Iukb9w8TOfM*>A)9Cs;{rC(NVJKe*@BPm?lNhVHu}2I39?L?>kOE?aC+WdQ&`1s|ab~-Z zq-LVqC3j)IB3U(G83O4#Uzr$^1IB(2jYPz$a(^uWVBVDDwFIb|sNVtQY!mr0`O$krICKi{-VtLHae6pHBNxF7urPKl|xUDBTR}KHTt_Ymls=)d8IJ5%hP6vZIOv^9bDH!louKfK#@C?-cpL5CQ z!1}kN5PNE%CyCo^9z60N7FN+pzg-8oV}rtXwBKu|NvCbO#QT|;v+@(D71K^1)k1oy zY1dT-LE|{) zFvKZpIfB)1?5SugsWs(3`Y)mf=xC7$-_-KuGIXak*akTA2Ub>bLoA0`dwrxYbbk7E z?5S6&aMmUks&LjO9Ja$HpDB`ka6kA?fwWz(V6_?1V!3y|JVVR5HldFKo!Nz!3d2e^ zW82_Y1)y%K#)2{#ryDQOfUwi30%3MxtkbkA^tQ?ngU3QtSNT{zf(LMdx&=>AJP#~) zlK}C9`3hrNRRaVOsS&rW-4twQmc}DAxHaei)6#oqxCv*GD#F5MTYA_(h`SSnQ}}AmkF$b!#vDbMJNV;rx792)02}WS`LP?M!Hu zUkK-uiTs5R>%6~CI}b%gkj!}RcTu(yqP4l8pkU7SD?9@tnOO~ zKhJ~JEgyjyb!3hlP4h?<+2paVl)_sEQVDH6mQ!B69d|-Ic9zS@T~3k13e}*n0B=?p zHm&klTX8Th`!Ui5wJz@rJCI(xB-+a4psY8*IPzzqlNl>6%ON`G! z8;mgM|CnGUGp`|Bp6JtgY(VD^vz?& zX_(pI`d}O6c<`mWz=N3M5U>P-fH0op@b`Bt@^J8oOcBNC2A{(RzdU5{fov$!k_cfi z5CuS(&mk^>GDrWXEjYXZ>Ti}9p_m+*_>~k@yG^uJ`iv*{#mX5ghD(Lrn{=_l*&468 z*Jao-nK`&xt+mvT&1}&}JOPO**>8=IjTKqu4XC*ufQl z;``VqRqa<(6j zozdD92z<2KQbI^mVJN%|!3I9Vh5XX++wn_jK&#s_C8gI3Z$OyZ&7fqGiXeW1Zo0Z~ z-Czi4E<7qHPna$*p)C)DF!LbNZfMEKcC&@lp2991r#^eaYDAkq0j2h6LqSc0V=Lh~ zEsbs$W6LD-M=Bk;4c4hZAk1b!?L`nTIQko035UUBDQ|I3$QR>p(GZy z0Cq- z@LREfdK+RmQP6N=gVY7Bb4)@H*zPnIb@W4f)`2q2zf+o;@3EW@s}1*(!^3{S5{+lL zGqB?=z#NNp)M-D;+z2xjIu?H_MEr@3i)i_vTCllb4oX!9OIY%11)E#DWj+O_+=?%Q zdJkg5>l!oO&1v#iwh~b)TZCbCv?H&OI~&F;twA$tu`01)Gk-Bd0?1EqK`>o6;^m`k6e^c2HBJMJ-AIzQl-m1-R;bT)F*c~4&uxvr)IewAQ z=YQA@yCX^*a#d0Gm695RGxbH61l?iTze&LdWdaZEaNZ=Q$76kEUy@IT_cg*;qwJYc zy?Yx}=B&2@4GTZR&tW$xH{p8bs*njC!}8$#>u;oZd+oIU?bYXJJ-_VvDbE+~dT!VB zF$HPUMx9;o{>b->&eiW~KW54H?>BtkXTh@TC(lhx%ltZX^fHt8MOWeUXQ#hc@Kf64 zQP~UHj%-`htA6^E1&hu;XzTn)jqnN#Gi-p7v|9UM8}(Y@7<{!R5B~7iIhVee4k-*b zPzG9ayYIGPjzRjb4rXY$8N0TKI^=$^_+v4)NNxLKIKswl?+W5B9S&x+OJwfQS@&;t zDh}#zS6zdRbNWD=&ycT|i{{H4ojWF<-QQj<8<)7i( z&dRS*35WCfMps&sD*a-q%Z~D9@9<*VCMXW|iX8baN_Ma!drW0LF5-7N*IHAagOukz zmUaE$E97$VC#>IJDY|*j9KNu+{`wqwECzHe+!W3OWe}L+MtQ6k&JZ4VxKSMH7TzS5 zt+$Tw33Cg>@xu>k<=_oeS}DsSH1Rio8+7(g*~Q zg;?vc4$0S`^leSD(&RJ+&=7R=tVN7{6OE5<>=DTBJs=SZH?rq~rGTKj&l}WhPn*M9 zZOJBr$s+J;A((%a$YWdVDd$p%$+Frdsze(c8!C%nBcZ^j0GzZdE}LlkomnW8K(H1Q z@)8qygInb#&>3EG!($bKqQt=LdtqKEr;P|ob8l>h;=^`IabuYnWD9i1RY$^N9m)e5 zbBS!pp>(M_B4?gvW`hW zqmz9sE4BTK1D#-jy=q8<>uzp}Ln8<%&e+5Tb!@pfh6xF{4F`9)m=y%eH5j?9ju|_? zF+s`=X05@}3w#rQ!x%f^!7Z-486I7?fCk*%3{UBCX+#;N$M4d}L@Vo?qs*YLqh2Y% z4Gkk)D)j1CK*M#8qK>%FWBD(woUS5vj5IVhP&0GXNCjeK4q7n@ zw^YrXNhE@V3U_Pf3V)8gy2dSVGpFsW#y%KxE>P&*0^+i*_yu}uvs0PIdf^v|(o{hvgOT_4k&$64zk|vLL$eQ- z|LdJ5ExamdYC0$k+2_b7$0E zRBhdJe3CZ6LpU>yrU5aH*Myk)x4i3Vi9E$!g+G(h~?ydqmYFvJIE~TBh z-|I#`1C_OGpqMkrB{15y*PFJw+Q?d_rosH$@O!XsI7Ph6o#BeH zVH)}12D=!6@>;RWLA6fChBHN^Eo>mtoWb+hBITtNB2-fm=C*!10(#(^mz756E zu*a+lk<~U7D}N1CXo0e0EG$r6nCLWO`HGK1tz|4NDmeAWpfd_C7#-1;Ro(1=sJui6 z#pM~QGWXc{ZM6163KY}4zJ0~A7)9>Ko`N>0^57}^)>(`$&M;jIh%JsVXys{af4#yI zC|3M~8=GPbEa)vb#psZ{gL_EJz=|C9wo9blk8Ewg5;e}^^+W>xlXIl1>6}{0`s7~t z;*FsRFyf+hz>^7*&nlbz0uy{NMA#cgab8g5G-Mi$`1ym2`F-O@%tC=== zoL0Cp?xkYvx+^jXXM`P|J$M~1!_N<0hCiW1;AI%LS<~jL=P-zU3E9ZEjNFS=hmq58sBn0tzs9!U zZ?%vrMNq*4M(Yj5E8fU3vY z-7H3Yso^EQ@|ZZ4mBS-*5lTG=zPCMLZWw`@kt{Lo#ra^mUvo{hU4*ESQzh-4CfcsT zz|Sc(EumEqY|bbfM>OE3ibgOscSPnF$L_U>widOfS(*}?xf?BTTH`gmU-POQsn4j` z0n>;Mr?qju#-Ak1;mvQH#*Un@NyFVjYM!IQNxyKBcTr;do+h|yrCn$23CQ7E{qg-< z@({+;^PMa#XsGN==uQ?I-9&5QY4R)Tig5vH7{hNhXFS9MsLe-N319wKXX0_~Tg43wV^+IYLvq{Y3^)YOQazcIiJ*fskn z>-wuz>?qb0y(9jms*}-7C82J*L7wk!fHU6cZ0(nw@C1bjGC;4w6Di#=prfBKJps>% z>Q0(5vi02ErNo|07{RoMNTMMjg;nS_+KI;OO2t?VC#xA#$>BKCG0Y?_N@iAweVt0N z@q{0sQsfY&Ql{{SD?l2x;`LC6K$UvI>0mFo`}&pA*7_bQLpYQLz0VVEQaO970`r#z z{{3MHu`v>&yJ$K^PMJ*R`@DDpepN2`Sy$`_VP(uAT!xB~H4o0wEN}=E71WSBm(am7 zRP>`Uvf!C28OsBA}xWEg=_1rr{>pU&z-dKp=!u4=UgO> z{dF@ET)vdVY%_r(tJ7GED$Bz{?dbg_&1J*oCnteCy(S{lvDGCzQf#8ZDUZGB^b)E4 z{^~YPyW#Lkhp=LtWL(yQxZ|uHsvk~*t?o6u@PIClV~mutGZuz%m_iOFlv(2Q2*eM` z5o25ERU{7z&>MRWJ7CHa?NCN?F_#IU<)l1MT+2PQ%N^l>XG>3s!&=(BhF?FE!>Bb~ zgHs)`^r|BijBfH7AMlt~4VHG(p!l9fiRe2pu#tjE-Of@}HBM7gyW$U;qN+j|uJMkO z)$IfZ@~Qe=gtyJc z7MLgJYC<@D8C1E23gWSI$q4RS;8EOzO!YTTfueOr@N=m+3S|9KarNFgkft5hsspFL(PlTtrfP#1`D+m3<|p2;koa%MEYT zQ9EMdr}2v)quk_j!ja<{9*f?a?syH38=FtB3OMXXeR zv}~z|MIe;ntTa77pm+tI^W-CO)`%r1n}RRZ$JIa=AtM^RQhm1Fmu@x`56UlkB2|#% zGE#lvo%ZA28a%Nj96XDTToQ=vK?>*QqD2EPTmO>!BKWN7%`j9ih2t%JP?IDj7Dbuk zIt)6Vy5ylbzk=N0GdzarY~J-xnyeXr6@>b*gQ+i8lE~CO(5{xWh=!N~8~j*q7s~Sf z_r$6r5ZOokYzywU$nh8pkq-K)4;bE!!jCT{9lG0~#w&c#O&-}W3y#?C`)wqCGgLab z>t~;?GCYFk2n!E5$)8u?;lL6Zl$l9!AT zuW-H9PshH7WULr}_U8J?Vi@@jSX1`Hb=g==l225aq9((6NysDXPrOBc{!tR@VEQ|_ z5BDNj01@g-M^wW%nBOI#_=OrHen+qz@pJ*q7=4+{W!KcfhmY3721zJC2YSajjjpdn_pjP#jLvpQbEJi#4xCp}c zkn9m!T@;=CXCLX*t08XmMOa-Asdex{GdCIe?9r0eAHB#%j#ST2*Gu_mktx3hu4$jb z2HA-7zEw8PxGEbDKz!dSC>MnlX;%24iLX^X{I4W*k;pc9q$VjNyC}P=U2&70Y?K?2 zGAN{f&OxBl0ju24myZ=9Uxp$+*d<=__@C|OYG{8HhBU?5U^o4OM;Vry*YCU0?`NEQc-Qe)C%!gu7{*%6j#he(jeYUza?;w%^*(Z8Lg3JiRPyYwI6d!Q;5DaN#F63bG=W`dUA^;OT1U z>?292$BX{U{5MyPYPP?dHiuy6bLcVgb;2C??7b&0h$==~GeMxTPH#NIX5j}hZugr85ittsQ>h}H1NrI4~-Hne13YW^NdJw7X=BH&mH zbZ5b1ldoLG$JYv}KA|@{{ItXHU_!GOPJwqh!YYbC@$k3mBy+^aiw*Zzm?7ixFW4$U zdGpf{4h?fM$iXAc-jTh+te4Q)drHEE7ivg#^hcErJSaS_@F6_Eh+4LxUJ$2SPR+L& zKHo7Nb;gg0cw!_k>_%`*Au}3PDr9-$cx)p7@kCgw zAYnUXpKe>R8 zo9i<+^#ON=dHCHZUP8xJ4dwE%7Bl4~kJcNnV94k~r;^#*HiRm;YAg!jREGNOY9IEr zMt%u}cun#R=$0m@*3)v8qbn%*7-~izhgw7J_PQsA<1d@lRP(m(UK#}vfG{boV{hOp+$+8{`m-e zWkFVFMbDepHiS&}$t?Gt-}AnAW-^IT?Z@Zy`~EkQk-2m4`=0Zh=RD_m$z_`1hYP11 zzqN%HeurIe)37CLOjOoLZPwo0`RR%$9|r|`Zt@I?ULXD4c+c^7Z|3Z(*?;QKcLVQ! zba>AAsGDDgh3^ROH6rAM+jnuhTzC2P*M`mgT>7q-XRW(!a>muTG3n>?f#*M3F{f5k zn=iA^%=ED4fUiWY?uJ}JE0^vu;zz6?S?fbjsQcY04HHKR3__(xS{wk_BAo%&J)w^HneNQTRZW$rIXSNcu7B=+Yyxw3f!~ zAe;&)usEMiGi3Sb28NNZtj74Q$%Wim1ES6=7b4LQxK^-|t7JfG+ zrY#?NKVZHgjanSd!3sA-ZCXVE!t7)B#32bKS_+c(qdHiKEOV{PSL;8B%+b>^I_lf~ z3VE{a5Ry+}Qk^S2xWp{Z*1R?M3rcm2Obc-nWuN%(ZfgFRCWSc7i!eCGgbQm-M?&Sd z&g5#`9LD@0O>ae^I;NyFS4RleWSQgkQz$s;uFh%u8>LfLiIz9e{^cRHQ+{Hv1cEd( z1w_H=2KYZl?WSXNschtBlJ#{=BF@^KRaGw-9M+GcJ}Kggn&h%}^AA_%o;-1YPol92(mCtT2IG z@hI5h)zbnFZ5B`!`3HSw=Gm}KQD>z(x)NO<$p}@Mnl7j0yRvo*nj%+`M{m-YKjChK zAIO^04~=-oO!8&vjDJ=-I?DJkehv6CB_v!X+d7FR_1)8nCf99AOEeDWm9HW9VIYIQ zek8JQG$cvfN=K1Mm20F5c~n;*uMp_SA3o8m>qMSDCy8H^ZZh?FXb$*MNT_qa?nG@y znU*f;;xP)=Qk6%!mfvfB0F}t$O+Si%mt(QAfH#X6yh(=jhVsRM>421SXj#3d`bPY3 znEP~DPPdeg@t{81Yvj7^QkOWekXrBU;<7bR+d?KWk+ckq&FL7u%sJwm-k z_e^fuwtd@P<4@eIxa+v*gx(X@+--5V>-Y%o;Qnzt!oBXTIP`1uI&bw3xoM^L zm3q}W(Wc_KW1c>}eb$_Bv7&3O}n7foz|Id3J>GO2ftQmZ5BroY|wIO z*EiwOtHwDT!S_Bw!MS1?>vC|RHI*aSy#OR$>~MQyh!KUuB$DM3m$OuBQgi6Y^A_&M za~eqETU~O6@21kjyxf?xVMF3&?fTh7(`Ef}xfk@MOD-D(-!Vq@^fX=T8)@Rh3v}eC zeAoFV@WT)3k)y-GxilwF%;|_0(=m$QR8tl@m}PmAexmF*58MBv*68YCxqg@-(&4yIkp6~aEUW@@=d|=W6dS8=Nv0D0m zBoO5@?WXmpBR2`@+U8Ov7Dmts+{^N9wiH6ONB&_H9=8Ljf*-)g_P&&v+pT3$y4)J5^(^$C_iWQZfzHhD7@EG|RZr;fm?c#G8FiH9>gn`erUHc! zQV(#fGfD1oPM!X?pVLvl8IJ3#uo!$vCzQ`LT7K`^5m+EPa<_KFuk;)jU0M=H?4&_h zE380;(G-uAxT;y#sRu=r%ac#>yN_Lb^^xU*@vF0Dr*}r2u6sLOuf#bd=36Wkh{MUR zX76-TJ)s-_m0=cJ5R&GKeN0M~^_6l%@uwH?l+^bxX9$@k<_!_)Qf277j=7 zs8mj(s80BO6M_ONnK?hWJP;kQq=!79XIh8;vJgl5JHRHhQB|YTx{ONeU8T}GPTd>< zeo_o#V|iGHnajP@qtKN<*}5B|DUK*P1O6X``!Wn|LBCXsAP6h+{SKlthszk90ik3&+pb{ z*OwO4$Lv=Sh*sJMA_j+6C~zoNu)?A5w=y_{V(Ov%LHWx90*yFBt{{a}3BRh{c#Q5E zTsPCzPI(Occ;P6J^kw87dpXTad z91O>w?@lrw#)z6`rt`6uXQ{#!;Ysflb%qpo^`nZyWJI>osbZ2mO|HNRo(tpp_#kVh ziVws^GbC3GAU34tm~mq2i(hFpM-dvNX(cHm&`a#|0Vs}B=aOk^y!JH-pQH;YJOdO& zfrZ7QIp1j3>*WxIZFU68YgX1IJR}6^mt4ejh@m**lsV7wJqSCUynn2Z0cv$&oyJ7gp_@nEUU^tlW;wZ z9E#Elhvr0K!+TE5FFo>ed-8}~?{cLQTusYt>Ri{`=rf!vTg|7QvP!MLzpHZIgeWgGVIM%^;J`teYxrI%C#U z+K^4RF6Rj8x3pt-#tCOv(}bFtnv#0x!Oqg3WgXD1UukxogY$gKe;YLmKMU~JYE|j? zW5Ct`e572~b3&j9xcP(!^bmJ9(!T%2IibR78l8mwRS4ZIM&Zqi3(Yi&%GQf@kT^%~ zj(q{%Y%uep>b0WE>%yA}0el6fd>}{EG-TA7nQr!Pb?A7iIe089m2@vy(k`|} z0ySlA6CQIwZ8Dae7Xh}X^pb5G0m9Y9p#s7sn9j|{SV>pwrNtB8lrF&8KEk~pZBlBH z3LLwDIL?xTBV`7x5Uv0~c^PbhC95N!eZb-|utvTda71U2EzGdBz_aDsSHPCw)01S) znHxIc<9Tz~!$G`}1I`#!@6fSNPIXK;{jkzVPadj7hO{idKb^z)QOAh6W4Fskn{meJ zBQQ=%A&RjRP+APdnuI<^dQ9*B!M_0RC&Ke>6Yin%E5W!$7RuXz3knsG5R8XJfS0wc zfPmH4DQw%0k8;O-9=B`lfV%^X1I`Z!@8941#?GC6V%Bd=ySeV>kzLRGf3h?0wbz8^5S8dOZoku{m?HGsfLILhg;UL_gDfM2LBAV^mqwwai(BKdvFSR-$+S~d33Ll=!;yXi0Q)Y zIqzIHy0KUn)CHQghtDg}ymJiMPqSR+grG0zE`Am}#zV6dP{k z^$}nM2x71iZIu1`z3deQJp$q^TUa>g$->yQ$o;UyY%mI=_(T6cc_>7gN(R@OoC}jZW(ew6@hdrjaNTj0ABv)lm z0NyH$pA8LK@1$sXfRDY4a+F*BH3m#ORjU^ z+hA=yDL6<{EiTLv^FO%qZ`he(|Aw9U|AAlr5jz7khic8b9u#)w2uYoSF&FkNEv6E_h;8htQYV zWjBtXM*0!kiWWvlQ0kSEYp4sR{M{VRpgID1gLeIc{4hqryl#m3^ji9a%k}BqY|RC7 zNlFDhvsug&MqK*`5aw{UW~e#1as=mulElSGk?%*bjXWgtCrfMlpF@}G`J#Y&>XPs;Cck=DIo<%}$}Cwue?-XdUZ{43e$0=$_wf0{yh?r!{W*p+NUqLbT7I818k?+3%^A6$RLvDf z@SAz`mlx_IJMP88d|{7vGVw&iX_*^(B*w>Cxsl(Ct{iu<<<4*-B^!lHlco^$q>v?n zcW;KdjR^Dh9@qa#|C;@e_wMD-s&)45gyBT!z@UFD}?mN>z z&UMM1@YHlOV3*jtt4G&OOFpgSQ!D(&;On<{?(dVh{=Dy(?Od03lbadQad&y9H_f^G zDhjx`AcLR1k^-)nL!RaSqQ7FG_K0(L3K=uOB=}@Wq%~+$jqZyLVy#xGw0FZ3O4uvL zejY(f8c=*Iu=ME=UM->JP7ari61bG>POPV=6<#01=nxK3J{u@yUwuZkxHiy>-+(-f z=|I*0WL49I7Pj8Sc9VLGNY5{z1ROSp(Z;v6=BYIC3Fnx{G=_x^B1g<@N~JP`_sQ&h zPyTF>r%$R+pDs5>s)10V0||Sy)Db-1e}-ZFZ5YM{y`Dc}abUANU1G+c-3Vf& z1!owgK4te|gR^*$K>gZLgGZ|A{zsvNI#uFx2LQye>>^s zjuw@kG!5G9xyCa%dR6qm@l(ccyg6-GP4B5ecQ@Z{ba=#g|C?Wiwg?ON7!iEJ`MW{8 zVt4s@YeVPulm^zCQfp(IY2#{KpA>X{^Z7|$ZX z^0p0Wy_$U)Y7K~ej78WN1ta_)Ya8L&9FXI{rr6m1|4!Q|{%iZT5e6U|ke50Gpfx`8 zMc{HA)CQyE{)gmC&P&LbCCD;|gcrNlARn1PwNUr^`8+GVtC5CS1bWRY6|IeQ2q=YX zqaG4O5x@!8r-Hah5s~ZB%uq$i!Pf)Hv+>s*p@sN6_yT5D*|EW!2&d%44+bfF+*5kD zXo%4x#3Y-|5vM8-U;mJ`il%{YLCI;6%Fu#r%@pBdLVz+K5t0stxhV`U(P-W9?>CKK z*Z8ty&}V}*&)Zk6JI}3E?{e?0uH$mWr21;j`?pOWgikyeylVWMUoM-c8uy%j9E9|3729={ort@3aYh$0vkI86x9f8;R z%4?IlT!QsIdZF?$+46pK*R7PcX`9*6ti50o%FUM}d_T?#$OW~@eo=dtTs=qlZi{eB zh{~8u(`L`UUw68H$J7%3x3it{gbhNuyv(vDI3AQ#u1x)9X6>)e(cT>4o%Kf9`BZtURfkZ^t%KQfBtE++=$LCaVHe_VSzTiijPWP=c1 z@L-_m_skGw6c^);Rms-Xnk5J42xH|wO>)RNXwXXJOUgmScr%J>~Fy%qE;zUiWRi|0lzu>GQ{#LjSqdM_qmI zG1YImf=tgJX69w*Jv^RvkeXFb1vnsY<9@lBiAI&1O}dR&%=)>;PbSZ#K_3|U;K16- zgDJ|rjzKlo;p%ttC&?PG(UMcSJ@kCjO0)X;=>%QgbZP87SrXog&Q+DfvK&*7?v0)2 zWuO@PBw6i$89$ZO+S^&e!IO`Sej}y0^G4SiSdKegscW170smXkw)NQ;N0%EzYT>cZ zkWy<8lT{zZ>o%Rd*1D5cR*MzfzPLr_!DRXwv$aB*$MHlLgx_+6mXkTrW48P&~v$}E?DOqx0$EcEzu%^zB?g*fWO*_ z;@tg`1nKD9203}c)s1a3rRocgo+q=cH)8>h}2wj?XyF`dB{A%YFIy z0`R{kYY#cb2^E_hFf1SY%z##oSL1Bi^|E@A)275FRp)82K=i^_va>Ce?0rV1=ckK) zdkk^S<~H~#U(6Ami`lS_wq1BYZiZu1mCc(aJE6&Fd5xVKZh2koPF1DR6v?06PkgJ# z^t1!5Hu(QdD3K#@VF828-D_kDwIxldl*}F`C;^=_Ol&{$cvBvM%Ca|eg?cSzT#evC z(}z-npfkCu1xqeeo#(^fI-ANWZ^eJR%(rRHX!RL5VXGJ6qT1FmXs*)F;WTuuj-1Wy z=!Qey@{Yt~MOTAd_FR~ho|c}sr1iZ1{I7MOuKNNoxS+d9jfXxFx?YXmrEPu79EO`% zF<(S=)(sK)5TdX~iNY+rZGa}GdMR6gMj&2ln7)F4NNWlkV5G`Z1`nE>uE*z&q+mQ@ zz8o+Lq1k)TNGoJjnV^QwDIv0I*|F=j8atdU9M2F!@UR!mF(al(e&wPojEp^ z>XLi%bmuY?JgNHpab~TbQ^3)XqfW6SLL|-cc0@nW^jh@mGg|7LA&I^pXKPJQbmx*K z&Cf~dKO@auM@lL!VtIavPH_g^TA?|v=%%(yeJN@wV7nPY__`=YJPK z`jfjgfWWrJ>?^+WgGv3(3O5!&+z~*`dRKFI%0D`|S(NIJKQq;8I{j38qoeG$*+^5( z!BHQ}gANHbXHZCMtuS&VRoAZ1R)-5Y_h?I+dhz9KRjKn@_Y`x$Q2_?6UrEy|n&rgW zQ{||5vEno8AHIjmLEOmx&wY6`G>2x{aY35sciEuwqfr-x4dE1&8{+oa#nt`iX}V`= z@5rICWIRrNPLY4*x#Km?Nx|wa4>m+r$}>A172NjF%pWND4wdh^>|6O!P3yD(4VVl2!);3ht#;7$UZkoc5icw=I?uyq^sp4=Wf)NYrvP!txSL0Hok1Y zmfk~e&(GAaXpa-lgLAd@u2QD*n#F&z{2cpXIG)87^<6NPPHZc8&vMFyR@ce-Dt(7- zr0kg-0q?=ajJT2=vR){s()WpqN_{?g`t7N8C;iAjz~HZJO}&{Sx&G&zYj$Xr_ zts_7;ZQ$wZ{ATRmVe5rbKe5|}pb>=ll99;%p}9#v8`Q@S$nK>otsdrpFAvmkoOp0c zG5;cI=F&nI=o%RO^F?+?AJYj1;d~4gwyZjXnS)9r#r4^c4+3E17YZ-@hMb~>BkNK( zNxO>%c1e_+O6}cj?C5h@IFaIhW*!Z5L@eYtaSxSXcY^<)9HAc8L|c9V_o;Kw%c8Gx zsdLHe_%-rhb@s#*rw7%i%GYF9BvJ+rHv>TM-~#KBCH49p7%*gMnQM(VcH8~fbJVID zE@PGseB|l9s+!AakAc@cr>uJ4W$eX)S)QJ&s)T;sWMH)C_*Jz+$Lt#T)N|;n_d-XH z8(6Et6?6Zel2nfnEL2^Zg!VLCOjBL{wK_7Ah=Xw!DVzoaG7HtTx#5=s$CzLgt(k!(R4$*^pZ(zby4s}ct@Ku6U!sf$)QE6 zx_4H@@%?hyQY~_kC}^BMdG1lMb~{Jxnc8Xe)dM|(Myf~^Cqd$C5K<+^EjWFa!4Ut?NX1gBTQcQ8&-{LB>u()$ zbkzUyaB@Zl4#!s4z4IW;sLmCpemk2guxEe&0%=S%>G5;g+II0dy{6p#T9obhAWis1 zFb|6kN(^|Al^>#-r^$ADT9;~gHho_}8Wp#fxST9$_mEc;vs_`mq~SP3C+;*1N;Wy+ zZlnV6N7arTeueO!WWB25hm@%NC?|q;eSM$0uy2!R4sf6ML_=OFdQC;PJ7WpD(MyJq zT)}s~!N2mteYDAE3Brp*$|yNi(!8Y=zRJ(kHZ^9ehjwX47t8;F#G{(%Fmd9A5VT9$Xy)WX~fAeF9677`o4I$B1 z)>%t7C7qYc5k}nLCBYM81o(ILB2}tTD{`&^h@J|KwKOmPkao_!d-o@mnY4@tOk71C zn-KlksOnY$i9JU>+b2di@rXs4O)ScA#I*+)k)tg3PJ@smbk6vVJ$!smOQ2D9xIrCy zmBpEja)5%lY^6wWws82$Q*t*7H5Pn!(R;K@t=Doe!n=S1;YVCTLFP1j*=p)$-JD-yIw3*iDhUF2?^zp~vE%)T6T4Spm73@3GzAQIHRw8&9R4MR$$q`}&;J25O*Rw$d6^$9Cf{Yv>XT>YiE6mrtNiF zw93%&eA_&;57a+KRpr5Q6srjC(C9Dd_p1~ci8>AtTQ58``SFYohXzR_|3brjq_P$J zg-&@p<5FMw9Z55ToE}CAyU&IEr!`P>Rs$LRQEG@OZkzQ99~caq0BB) z1f_>4wbL6ef*uztf>24aY|TSmn$TGZZ=5qxNt(*$JeFS+exhND7K;I#%@uQq_hagM z;j0mb<61;}L>Fvb<=)!Gj{$+z#vOQX;I)QBKil+p=yuP~t1Lg)XmWE87Z;Ci-fIr6 z+Y`7Zss3?~FTMIT@$g)IeaD{A-)=TM+;f6=-1PxN1HahOcARhTc2uRSp$Eri`|f=E z(E$ri`8W&XTA#$={D*3jL)-Rs_}*5DDNp`3J2$D6DIZsc1hck!WcbY1Xy=@LC{G&e zWe6$XQTB`0b*VVC0RrdFOhN3};r`tFpLUo&Cr?OzWTrHbHRfLq&<9H*^%VZ(vY-nS zs|M?Z?*eJZ3WI+n%IYT4`?*^Y4*PlAX`m| zMT?*dMOYNOBFHaEZ3`fM^X&u33SoBjL!oD6Rpk}|WRUc2X=}PUs;_rqegka769i_Y z+%tN_Q=%FOWkrtK`Lp}E$B+;~l^}%7&@*fT8F{PS^yOn=qykMAn#2srT_GjFV!F~b z+PhzNTv8*n`;`(CQT%VG3ar-8_X{%H;}OQ1Y{?ZK7z3QUeI#Upt*MYFlplSc7K5)) zsvY2ibTpbc1px9!FAlv;70xVMBl;>l3Mhc~HVVK}PAH_TQ$hAN2H^E;lCt(0Uf4sI z75Hoo#NG?es8Fev+Sms$=@wv#vP4&Dl<{Fg(`{q0;QlgR_NRZW&jHQTru< zY=IW$QowC4%~cm9N>=N{vp~#J{w?YmI!W^1>YP>TK@Dr5Q?@kraIt}|dXA8mWQstE z^kWee5+@XIA88o8H3Bd5^$gLFx+ zR_-CARN8eU-%(PZa;Ki^k>DGX`<5b*1hvmIDpBA6yX3kILH=Ua-jMuS6v8FF!E^eM zref#qU++3lrPB6(ZI*v=E$Cc5H_yvecApE{QLl+-bl`70f|BaB_q-CgJ1J;wJr~c5 z-oLF4x>>KSXN>pmn?Z-_HTS%9{kJynZ8n6}h^FnbZYIS7>--C@HjTb`MQrIXqWldi zJ+c<*ZT^47_Rm%>fk+;h(O|`c6j_TIFpFb1E_&+ zQ65rK$oHD`>86u%YgT`h&ZBdTr-%8vzUBjOS;(f|hS>SJR5+>}S z1@plt7_-#VB=>!C*j7fb;Rndve^EP>2$RWaew=W6e=>b_P&$3Qc1K4PGCl(Vicrz_ z5@b`X0|qKLT5!mDvv^aUv7y|Fw;h3z_N+!_&4`yvlo_A+JY~#HhsRn9tA>rnM1w(-B*BqSqsKDsDtL7F8E`P<+H{~CiKlq@OD>yD} zn3>>2($q_;-7){!8lQU`{p*=jJwIY^@}ZVSg}K$Mw(fr2=erh z+@tB@G&NY14;-Rw1sIQPxN)o!%94>u2f30f0pF*V;|GK zxO>kljdci9zPs!{QpMuLF6wuP^+1C9i(~f~`Y4QYYikrNroOQf%phVS{TQvyNaYk- z+l5qJ-cE~-l~7iJ6rF-q&TB`o(!0O=f<%x3TLX9WTyyx>WsR=Yzx!ESm4TxI2RHm^ zsjo-VCLT=#H|{w6b?2@yo{y8;3>d%9 zQc2p4*p#E*n^@v~ta|Juko4u8e?9fn!craS4^EbFaH0n|aVi8(*3kuX*Zh6$vkr%` z%DZrn2kqjo{=BGdTP!=0%wsky97-uQJ})BPnvZAAJDxej+i2FT|B~{C{nvljEyjPe zwRmhBl2tcZ7KY&#vHpz0HXv|Tt|`PJ9-}k6msWc|G&*(enp8g0jh73j721`*PT>ft zVzTNXBO_U4e51|c_jt1yK?((T=Lh^Iz9u=sNmzb6EhOv~yAIErB4P2pAJ74EU*he# zI%`dwdVN2Zd(bQ2C>Vvtk34<1Iy`%?&_yuG0H^}q#YYdYvFp4M*<<7YfB&=6!z=5u z)Kz~lYu9QQ*FYFu{0jU*qaGk0PD>H>3J3e~MPV73z!z#8v_eNpet0H@Lr33m!fw&J zI~sJKIu4H}KN*!!`>*lS$Gly$>E64nH7tH~OZbD4u5%2k6O1kC-xtqb>6k9$EbPr= z z6J<`m%293Tx21Zqa@I!PB7OkwM69>Z%=RHh<}U4#XMH7A2X)N!8ZT)PpQ|5q;;3dx zmJ>R}vQv$@<_L`Gz8fn{>XOl=hg|&`1*S@>V{+G?t79}Bkb)OTnl2lX)d)JDe!H@E zZwHpt5?GzXvL@Q37*&?lWj>9{UD1|)_|tFEI}l;JmqO5!G*^@d#rN7RVL`zb6&81b zE`a|W9bDU^8aq{SYYjADZ%1|5ICfvW)e>{6c9t*fG}Z64C5p+$0*!ttmiuEjO3LOZ2ip zRTiT(F(>E%c<^lyJQO5)$si6Ln@hDp4>t8!)@I1^-@d8Re0#5H&HK4DTH0;Ynq%vB z1P1P?zsqB&*B2K(rmRkA7!msG+J>P$$9doK9ymU5=#Gze`HpMa+@sm`jcrc5%P3$h zZSlVW9w-?TSm%`y*SMh9lA8P-@Bp(qrx3H+qzo2I8s+b7KxdsCPFxYf%)YzO_3B*v ztZqZm_V61On&j_dbgcT@BU<*8u zR+j?jLiJSB4tM-!!yUOc+>ulicbqATJH~R{0d}Jr_Fbgxs(@3Ujly5&Oqz1hIybO? zY!}PbOU>n-FIcy1_)BnQB^$@8jJwcl6y+CssiL_D9o-2Ya&YJu3Nf;kDwxzG%Q+`$ z%n`HYg;f_+2Q!8x*+ECpRCM7zVCcb~QI+UZQ~~6}*^~=$TUHhqJeIusfe<%1J3>L+ z3b6_r%^YAytbU*nqMYv>gx3g%c+IFb1t;M(G81nvvG>SqKgRTxOUQmnN(=GH1(33q zrZBemRr+ft8b@``MoBj^GiT09sFSz3?E9lQi_4@h!0iX*3hZI*Px@xR*2&5Xzc7lGU)GkP zb`J2a0j+wv5UpCxv6#NhEt-cx6-Kq*NWUaa6yhKm1G}*GjaDro#@S#m&W z1&_tK>fzXYFxq6A6QxR5#A#`wJ*!&1Fss@m+sXN(T`TOxY0iuOa+Twrn#=wJ)&n68 z3<}aF$d)@&#C3{_Og&aqOuLFC9iJE zuzo;{z?w2YQaqDSzSu76V~J|FsQC*`nHQC5@sU43iE-u4g8URFqW zX3W7m7gFAASN9jH@JAb2cho;BJ~rMzH9S_17`~Z=-|3ST!hk&b^R@HL6l`)os!9%IF{B_pL#={jKPi-ko=i2ScF4vf+&ol3PbY%@1!7j{n>& z@{w>ZAMxEmmF63d;yx(3*xT6w&w(yfHU1gr@yN4jgoC-2jr8bzmN4#Z zc&+HMI-+A8g-yxk@@}A=d(;%Vol~aF*Q{H7B<&gFKhVfG;QC2c_^mYiNE%+ALTb;{ zR6nPNEw7!J30vQD)P%)_((xX^`y7C@%@Kh?a_v_ifcivEGco3i{!NTDoBP(b7F$ zHluJe6%zpww}wd6ot(O^YCPn(J)bT7rdrJQfzw>vdj3@9;JK!gU7GY<7Ijw@sU2z~&{U=lbtPF8p3syDc zl>Em}KWtNgm-z;DeP_^6$I>H@S|gaPUPX<{TkUKdU}lMD8{M;o4jtZ4cba}=Ep51( zNIT6f&TU;GyOz3s=R_w23r2HCSIuu2E%sMIx*6j z2BjLtLRnH+vdXtOBV-ssXLRMQ&y6&LlQDyyynsAzQOq8)XyS{LgCI{8k%Mqy7v5S} z4#K`05b^?9H%*GG@*g9QAE~zWT47J5AS2cQx=cvR3#=L&lzfZMnJvRW8>l~mc)9j_ znyK|BKpW@~N#)4L4eX|CI=gtRZsgnV-Cow z!VHX$0SEZi4?k4qt;mU}Z&(987Gg|Z0mqxriqJ^ z88Nf;i#@#h(&u5O*e?)h%L_sn3V3Zdt)J@KOcdZ8;JN)?>0cHdc%< z>ZhUG5+>98l)Qf>z585K=02q+JO+Vm6~$Lf(!jT}yz_nvf#ewq_v?Bob*$u$S+z)= z`m`J5q|zlw=vwInCh)h&`%;Bt((h zg)_|%=>L>JOU}ze^Ba5ofFBzTqe&=dwi(G`iM%-sLB7Y5@ zkj?P%*NQB}roaY3VSQ{nxwi`gC~9_j=^-lvV&fCj_oa4tP?>AO5dk?TsS}+ueoe*T zRAO7MrDbI0$MPiRk<0N(dw`Ck`;-|zHzwXbAS*L`o0(vJ1FflT9pU4@wT$p7uT3h> zfn8Levkl&GSaa(*MS;`V<3x z0aa@0gnEr7r??uRMht8YvOAhGdlH@WkoP(=hIze3VE>pFFcmPD>9fjEh2VaLsWfs- zn&=g>deQs0+D>|V(sK6AfVEtaVwm04Q+466yj_?|=)MM1@ z+YQ5AcI{}mtLON@TY&?o1P*WLe$lr{Qx}gW-W%5*9uTx~$9ubaJsrBy)9d=OLyacH zluy-+YDks&=%QVeS-`2^8+Hh#@G!wbNgIu)&BZ#_w#*mSw#@p1woIF=Xv@?rkcDXN zWg(bl`4d`HZQD5pot!iMQI;I>c~;1PnV8Asqg;ZyL}ao#%0oU}Y743l18X^0M5|@4 zFqy`ASqng~b?2&5r7bg)v`x9NFST}m4Bo`?!JDDb8nMBfY01JfHh2@zJQEL$?=aOs zHhJ>_V|}!;@%&oLWcNW;fZDJzH>I1MHZ!%nXR=;lR@Yfnt6HvF?YdnlZ$7t_ zHx+AhMQv-WH#8u9v14bII(AC#gM)r+1MONHGqU=9HhR-*TZUzJ&mrG3yZ1nu-7EXR zGP{?RXpYU))XmPp4zIhV&+{}We>c&|F|UJq&!VXWi&<%STw&?FiV&N*q& z^JYC|YxQ#sQuthTtWiBxidt2FlQ;u~MrN6o!P%NIhCy64qEtW@>Ag|owvQT=G37J0 zdRU&^r5LM6!k24W4LPAkNuD6vM7sRbbJ{3B`FOKKqv%FI{enO}wc^RL7+ zqiGQQ16aSDiEvfrR%cpf=582OyQ(3?+BMCbmrCb#kub`lBDzDf+o;`W z7onNI|Ep36CGch3zW+l1_xkx42mFF^Aq$s}LNh>zOjE>oZEi>j1{e@4-eCXtBuF*O z6wZn*?CU%b`6{~ip*6hLwrRVIL98AdVtR2e)w?|DTQXQWb-Q#Gb>guZmv?`Yh7 z%G;A{^EJ-j%*YjU29`=(^ni8opi~76nN$EnN}I$*tl|gmu4oUKHja7# zuM&&M{FPoMrRHMdN`dTF^f9UIgPOd^g0>ccLJ>n0*2F97xc`xI^_7!(|10qs7v0%S z0KcipnkUqyc&C!yiK-w;aD%&mUmFh3E zDD>6mNA;bOWSy%)-ew#M?1K#mD&;exUs+JOkQzu1IUKMB3yT_hJR*T%2dQ$JFS&Kp$k*9M}v!D|3Rp? zPrrsWO#k65&6|iDIq2vs-QOQ+m;yU))87B9iuXtwO@0+O#f)jN>CJX<(7F20J2>8q zSK2KwJS6SHI3gy#MXaZ|Sje6YG@qwzb|nUSVj2 z!AVJ7hUxxTC+D>cjbLEsy#72%c^NZIGff!@xpydNc{WApg|N(kXj!ZQTGjn$gHBRt z)!tblqX46eo0J($@h^A*tVKP5mVZnq`DCm^a29Q%>CwX58(5VfFaR)S`k!p}KVk)I zf=c>c8rh$X-%&L@(l@LcVam|LbjK^2H!I9^ICT&ow%f6~DBbZ^(a!H?K5Pfn;QfLl znoe+D+csOr0Q9S-?VyF9jar5is8Cy6f(3;yYV@AnYJKM?LNo;OTT-=L^cMohyOQg4 z%a|P|cHw96iZQ#_5}bj@dxN%$T+sEOD$a&kd#V45>rmvh{~ez6MXCEQR&?|tPpXJr z2Gh|D*{RDYd4w_(c$u0&pn#T4P>2Z6!(XKuan9m{x*#+1Wk*Mm zbu9SU%2N^Q);}0xvKWcDD(Z2okx2g(RhQqcH%)aoir8_D7K71ibIPcx^;kIm3lu|n z!o`EKlbsn6!2h<(T%3wA8P?V1;R=(-@XoUme>x_kSe(3xYUPx9GVURpl5vDpu+=>F07e;j)>eja zuTkkgcF$G78>&iYH$uf~na}l9=5sr8t4ljCR67Ha|N90+Z$vRN;7#k?D;Efd$Y|6jj*}O0}{{|ojEC+4EaW@7i0RM#`2j(3A`;g;E%A#ynn{nBHf#rB(I@K;8zBb}uhTVU|kA7z3 z?v>%(_g`-~cPI)-rlKgu9OT8heou_=6y--PZGzecjcq~(Z9cmRne*!3E8O*FvH=@I zI$ic@%C$_U#qYQgn`JwlJha7(=Q^$t(il5X~@wN zRqij|j#0LaAISdeF6e|JGO}56KiThNE+sYP!)j+5;*wOhq;4)V6;y_0EN$A$)N8Cw zP{xdV^wVUpT22-ik|o@@L&XGrlPNwFKA>OcKR4U1ZWvpjqRXrFxE3yxzn&H$Fw+w-;ZY@mi{>R=!O=0Z(lhxR{ zacH$BuP0&!`NXVdRj5egVc?dpnw5Gqen7-$lk0xdu!FoQVd`1ONh`Vx_53Vozyzb* zqv4^ldGa^s-=0-|+$~*7P777mka!={*nlaH#kYGqLxxq@EdYwk>LQ8hidzxf0wl#P z;4$o=jS&|51is76ts%c;r#K_GgH995%`QZr#5qX0@P~(fDORzAZfmoHK2A;xWHvt| z-Wjo9a&Jk#iI@f1jMRlef^+k&@H=9(G;w{RZcaj>kWqJ62rbn)n9&EYQ*ySvQh1uWQwiKerz|dr=hU5?G*VLtO8n; zXc0@yD!?KtaL*ELr0ZR{4KX9d(^ezJAU)M%Mv9WvNU=$D!MvVTho%@6&6F!_o0MrY zD(dneTW#(1>a|`ExfefMk4MOB8(f0<_qFFve8>6DwaVtxsq0`#OGB?wRu9D|0Gj!B z^b*ay*@2yQ@GS*jHd`IgOyW=j6roR5#f#6j6GutzUz4rOVkwW{~CVf#{OS zqV}GG5QASW_Tp@+>4?q3=fb9d^v3IHNh)|0$x+G#2Sgc9JJ8ZXPVyo*Kts#e>QST> z=690LB2Y#t9`{SFV=QxlWy}_zk<)gvd1yhpd~F_~5E3~!m7*!!jB3>%WIdGv!&p`p z(H3%WDus!Qfh^O$28~w|!V|ucy;Z>d0;_=POe%s>Fr1hjKkS8`3e_fn!v7{Y%xWbM z;&p@dT2H@d!fL(rbj9siX)wWr^*T3u&N^msQM|>DYQO_fd`9$d#43Op$t&>&Yv(nL z-oUuY%U4+*tp)!Gx&2lh|)1~(*qu?am+%I&-&_! zI|WpCIB|FsRbD!G9Qu}?7(EeiA=nXGnOrybf$Pvd^FehoDQp>*B&8o_5 zbzi1S@hVrPEyE}WO(bbOTR)hOu_RFV#E7%hTQ;eJnxnyex6Fv`VhCTFYa-8zjYv8$#b0} z$Hf}elTn7x$P{y^{`%`QWno7;`&?5oTiB9Lqw|C_ii&kJW#+j$=87rj6#2-JFdu#$ zqb|@M>fa1VCp(|n+uxx|Or!m-S`K%^i8oy7ZBxrzg3D&eEr)>w;*my?#cQ1TH-SDK zCS4`*IM%Z|sWwsxa)Dwak41nl&u!$9HE`RRjXb)?IyJYon93}ezuQ6xbNR=Fk3ap? zFW}_aE!qWc^me(#*i}mG?x`a zdf`-o49#_3f9?sNVCN&>W)i_xajwlr9x^oW$op2R6X36);$Vh-z894(c@#*7%k5=l zAcqU`pBSuS`JlG*@S#1JX%zqJ?HMK`5shidY^IwZKDhB==jf_jl&E z6XA^kF@EomM{H7gcQ_aoE0xcXH09P6*t6>EV45$}L}@bYS$%%RcPe$CrlZ$#G63Uz z;S>#j=OB{+0X@^jt3-!M?P+qL(0n-j;UeWC8=QvkF##ZnEXb{anUBK(GC%;8TrHuj@iPuvtbLJ%4ZaQpu`UJ0S?n)n8eNM zpZFzna}GL(jDr1m<7b-R7GuCFEtUAg$ZVX&Q&MJ@^W$L^fn3^ueVQuH&lUabb@I8w zvJKhlMaY_M8yIvWN)F$`+?`j>CMT9)Ppu{~j~*%l))j!ZY?iM=q-B<`zQW3C97M}3 z-ySYtMa1A2)l1G1lkdcYf=bHiTE0oj;3@t;s4(Qf&gIdzRT|kMMYvC)jrHWbXwY*njT8~N zJ+e|A#XeqU_VM#`ZQUMbOCJH3K<-U)>|xoLyT`}FJ>I0QJfluA?(qf;W@fXFF%!9J zg?W)@)e7@sz#%ww!F+L*w;guDq`&hd{;g0)K@I|tpd2!T_{Hvg9fLo_rPf@in78(T zgulS3#QZjMfJUPd_Dy`WY$gV)@PGzqTL|1n+;}X2RaTaZLsYv1R>%n&CEhK;x2bP` zMAdBQF_&C;*K!OFuCQ;eRVCxZnxRFm5cVm1r?N2>XY=4fr{I8UwVr~bvG^+o+&Y^;{zkSqK+(81^K}q8QWydd{ghk# zGW+=ZNS!e+mk_7i_vGhYOXiqOt&#>-xYT`oUcBcbmClYdv+gc`F{FGAY7@Q;<&qMZ=)= z5G#VhBK2xFWod5JtFL4WXvbo4#DUQl$ugT;{&z)BHS`ovj&pxjm&jxT^JhJ$ zr&aVxu3zTu+}y!{JzbeHCI*GT5$_?>8Tu{1u_Ia(35s^r&4H5 z^W<3-${MNE#eAUkJeiq+&O z#>$IknK=4YR1iRT)?P4(u*%#)g&CK@_~W6OxhuY9aaX(#cSQrscc@vmaIGtx@EX{F ziE{J=4!D^~IN&Ci9dN^ig>XPytGC&dPdNw=D_lu2+#*fKPoH=L?(uUc$u6+i z%RtnvefIB-N-XfinksAqHVmE=Yp|FFcw+A!Zx2s?8G}DpQb#8XN*^#|J8%{RhbM4n zon7L=6gjaW4Lv5G?WB*jlbk4=36dlO$?W(EU)y1P7JLE$$ib7NI6gUV!6!J2kK>cK zEcgWLpW~CT^T=X)E{j!6j!u+$907l4pi~@TuArYXPeE)rF2II_$I(nvVsh83hil+n z2fH(Tk`++MrX&}qPgw8?Opb zpfACcKRacL^QbIzWLsTX5jvXKhmNnX%WQlJLWirQ0iW|@;J8@~ip-N;pV2&LG>i=- zSC1<-(Q@=2;5TA>!O?qgY>Yc&Qq@4u_|ywM<8NsY%$OJLdlXfSFROiOH^3qCyoI8; z4`Vs2w<2nO+-OB@ZiP;2u!~Af!v#^6ng$*ekhtPKe48uMcvYdfyzEaNROS+PE^$Qx z1SK`X4?3}N9m>@0Zr0ko6-BB6aI1W(N!^uOM6pk02r8fbobS>;O%%*g#S%H}w?MX< zT?g!K=TYr~3#<;ic$6Wp#h2ilTaVsrI`lX>c5bG&vx)J16-*J2nEq0M@+_H4<2Owaz-=2P35HTaURKC{M$T|^B|F0&~@ zK!g7HaHB&7aVRy)kjg4)Gzq5 z<_U+hkhh0FHJsll{LWp-ynym&vf~-;x2RU&=g{$W{j%Vpqt40&L=OZ%g~7#^Lzb8y6T+%RZVA$4F-oUOtD9LY;b&UgLMU`* z%KQ+|BH`^{Mvulkdf0b&_!)pocgI?>ptwo$ie8kF@Lm5 z<}$}Xi!+xNPT&CiFi-eAehBb?Zc}n3r7Ro-_#wa`MeG~|UgA@7K;|M~{^)tcM3`nC zN5^00aTKI0GFx*SN>?pu^n}@(!yq~<&me7ENL@SP5a8o-vo+>0oP|&pj;$8TRXJS` z8MoponH@G^J-Sn2Tp4nG1B~mg{a{>A0@PZ05v5nt6d)N`kDV(K=ir!&QiAg$=o~A& z$WX?M3~8VhUT|hL;^<%OPhd=T^slv9^8wD2WY_eL91Gc~g8x#; zZ#|AC1GGAhqd9Fmj;6p@(N?bn=dh!$dUc@1t8P1-fj63uN&MOvf_r)6CDEf$mscS? zSoG-DYJ#7A^vHXcbV4_y##*-Qp@rC@R%4OUP;WdHnxCTiFm<*Nrwi?US>=c?Sf@0u zxXC=N((q6eC8A{-7H3*MWG71s7J3rBQ)~FZ!3>~eESp2gSKPWVK{+xPO98bA>QzQ! zq~i1f7^Ja@!2)YmL!C)K7BWaPX?ZU|F%*HT681gCIU z14Y#4vv8}S2(Y)chDbK>rf;!qX#mpCKVnONd66wGFmtmsW9*bAO0-jA?^AZetkA<^ z%{p0RC%f~0%zuyZfXWvI+gAtgeEW6wE02`0mYSAiVLLCc_DjyoyPIERIIj?4S^;tQ%Vmx$%uL9s$MzTjzhwcmV#`5B0{u9}TOGCCZA);?(k! zX38N(v3(}wKv;8dX0=(0Tt5S2cXq6=WHAKz4P4>Fto#t2%~E9I|7-8d<6}D8_hYH0 zv{79s+M%ebYO1QScB+boR*I@xr>NRW6fLRc*h*D3YD=y0TI;pMRzbo^Z9zz?K@c(_ z8?sE2kxV8tbMD{uJm;L*vQpaj`}zF-_+I@`^1h;~%}Q^_4_gen&XOoHb>gJZeKHM|cNZ+R$%mTlp&NldQnCoCWW*BOew% z@<0h}(LYr)A{U)Vkb^Mb{3>HCP+H)v+s|{P*&xg~Q2^i>37i`o$ygNp4ToKer&^F0 zSmj}x$vjFqe4XtwQfIhyFST@y9ZtE&CxvnkdktSg?xA_G+~e#GCr|h;_ArOBZgObD zh^_){jf1KyDf`YRCFEB0a$iUI?PX-QFi#jItD)O?2 zgL)JrFDrY=0=Ie>Bau=|7Q0TqzdKY~SYI})P{{)DpPK=gz2=_SH&~Ur7K?f8!B}rV zZA?_bC-u%~N4Bff8C!DXlDb;ej)-wBwKB%!3N)rOMS`}~$x}8zM&NPs1n6fA{@$d0 zgE_MnuQHR_7Pomcb}1J#-*J+eF*HK91#k-=2qiq?u|yX-LWBubWyXs^ZtRSU<6BrZ zcII<(9KMN@-9~+*Mq+!&KI;1ebrN0)u!mz-FJ{XDCsK)8NZ;72VjwJ!d#M7u5VztA z8Bs`-O&d@1vxeDxD_jA7$$j5FlOZ??$_ot+jjKBY#)*t*YR;4(_EC5fR3(V&3}81T zQk5V@Cm#+&B8Wj=T`p_pBa~TqtQf>f#YLpj=(R^4mM^^d4|NtEzOIy!G;#`Le`-Bs;~MkTr-r+wU!)6K6$XxOM{ zP%P4%a6XYn6Gx4|rdqO!lKCqGu54d4XvzYJ%3!b#Jx2if%x7@+(+PoK84JYmKoy%|{Y~3@Eb9@v7oJAn~%)vr;$W-mf%P`EV)-pn& z$N-`%LjaLW4`r@I@P38!9oC57MmBsN0HHn_Z}1r}Ms%&kpjbUUTD#*Y_VEV4teJHY|H>bGLpzPxRV&@wMN(4f3h>*M|MC?d-O3tkqX;xG9x& zc@XR)8_-^{RBe&0%QF=a*6sRs#dQ@{?|@OAZABg`^zkH!@WRl?<=ZmlcngDB&lBO{;SFxe5?!T|Qw7;w-vTpB_+|re6aOzDtdf5V_!M zv+R9(S1pFk&}1k$!gY+gsx0FOY|OW~mZWsr02%F$#=+*QS+begL16UOi${kde4t%V zYiC`YG-G9DU6vu!k*(pKOo@twJ`fmqQotlkAW=}*F)giZ?x&SACLGP1X%2&$-8hXx zq)QO%1Ty(XM5JbXJ^6|&Wh;{|GG*)TO5}270?Tbw0>){UCd3|Kjyrbx;xT2wH(rzd zFQa)Pf{9yCIf+rgBfF|l)49D-f#xjwBoxalQCB>W-&Xnq1^#= z)QX^KHp_nnU)Y(Aze~}uY)*q^vot(G5~3C*2YW2KbO2Pi1Ur{h=%x-3|&E#5N#BaEzz}qOgiQ} zQ)jn^??+7(SY%qR_}(T;t~6DeA1BsMq4qc63qW(_A-()VpoTe*OeAZ_#H6vl5!@^O zhezzr3z;c7izd zlQ|&P+NE~3z^3&Uab?^}K)EtEUwE~N2Gn9-5!$rZk9!!g? zD1>{qpQA+A@@ zE3=nJhB8jhP}*mjU<}V@b#i4SRQmv;)6yTBF}!D#c;W*VS}npb=SCX2m~)<*$8J=~ zlhh05t%!{J=N)*-is9-{4#U-s7)|494DZ~5H;;jLXu*rUmn>T37CdGQ4_Pmxb%6$E zSMvxoYMPj1;XGC8b;!MB{% z028A|AkcF$Y8(2Ei&1-_(4K<9HN7Rx+i`}~p*NYhUFQQHfGy`6X3b|z{SGgA-bWLu z^wAvUeKeg?(1FK@%HC3d{zX4;@8gwNd(F@kEAQwy|A91BZKP?aCydF8_4Z4mV4KkT zUY_vhs<+e(rJQOCe@ixnucn&9FKNg#l;Dvb6AwwN;nBXst`5nOV%gAMi&*2WH)$PU z`7n63TcZJ?=4;?XE5of&z)NQB@^h9T8N1{Xc%yLUdy`n7g%~b@e?3(|>l+5?b)56e z-4|Ug;gP4^+?u!i_;y-eL0g6KHsdS&8i{V2TmgMJ>Kt(B=wDA2&*gAw*V_v5%YP}{ z6;6gr0nn3c<6&!`AB{2ymOH$oW)C`Q^4aVJ)=|Ti55>W-1`*q`q)Y}$yp!iT3R7%v zS+%N&(vTtW(VV5IR&BL7=c0BDvnCaJyq*g=9V8<&481`@@rI<3B4|>5wRrgmLYb^@ zaW2z<4Yym1UV*aVur@wpj49qXj%0cY7x7thxnN5taAc1uLS0sw!LO?0`0a{{&%Ij@ zb08NVKfJe+G9b&&ceRnj%tIQG8wzN`+-`&nlO$IogEUEloK zJeuh$Kr@1vH0FbD-7OuLVHBklI)PGXC1)D$s}!OOa!Bd(xX5u>MSQ0HJ-83*pikjE z@Tc9I6-<;94hwJAR2TKxC2AaG5wolsrhvfU-zJGV44)j5@XbJ|NQCplXWB!^uQc^A z^}+Z*rFAApFkVHC!*yHf=B{SZ4AvFreKgmToTc+H0thJ+LI7BTm@0)Y9r7K%mnIu;jcJo&IByOiNDdY`8XEK_V5M1JZ z6)M^%P7KdP;v3<3@Z|HDzQt*1kg#Ux1O}E&xjx2f$Kj^j==7Twq$zvCX}|S*Zm$NRBLzv=4aI z{#M%7R8zg}GWe*e@eS#!ET}=1TpT{T)~egds>5QtY0w~9C5#NKU>N_RZSBE<_St(! ziAm5rbso3-(`KWji4|%u^YjQUq%HwXkx;0cMo%7Yjlg@TmP;e>>>R7gK04&^MUcZs zsR}DgVcaUReCaAZT@h6VtdMfx+d6;!m==gXBa&^p>XX3CMpAZMVn)iB7d;@7@BJeX z40eLDf#Ee1a;(~1Vd5=|z+gO#n}~3DsJVC3C?0B#ZW@+(Km@@k;JOZO7@ahqwcwg) zK(+g*9Rj23pB688eV-~8Lk?qDco(WWr%TgmpdCev z?`iiIUtS*JA9Lk3yeG@sJb9oS#+>5E7LCict9V&pXAL*2uP|!_J9wB-gXbgwg8r;= zNDUlLo50OLfYoL|r#jN)wlt<^*`V119+hQX8k@ern|uq8{GEm-Ob$kS=7I2iH{ny> zDaB0E<`gmSclZ-*9thr=nhWrWomVZiNYc!!-*fLL{T5WZ@XD&r+i!Opy0~`ZFBkN= z)ved!XBrRp>GM~&35)AB9&x5mPPcB0Yi|CcX`i#*MlY_j`O9s6?sn_H__@u)NA;=W zRgYLgauZQNQ@T^U(*rA0)L?$Ke(EGKA`DS1YSn5JXSAWfO9 zs&G1D(bT_9X5<#DZ#>I-X2iGiQf^~-tZ z9cSHuSt+PK6FvOuVm#8!Ktqd2M{X5PN1k?6J*!rDDt6DS+?iq3LCx+wawd$bqQU(( zuvf9cZdxe&m;aigYjuHUE2q~&)Bc!6m@X)g=nMjFxRyNd%o$Anqn1aw>SA?d- znG|-K0mDEZ2>s+);WAkadG~tSZnPHFzU!nZv3T(>y+1)(x-AI@Iun{2FzpvAL#N2$ z%nFx~ay)?d$j(CEBX;4wo(kn0sxp|-8R?w$y+cvfpkDI0U;pa{d+^|xw^Q(_w7gO0 zRXaNOd27LQ7hXQR;8++? zw!PZQr+Kf{7k__i`HD@?j{4}@D$N>Uf!ciN44Ob!@F%TJ36eBF9{j7$D;}YCMZOyN zZ}aEz#t?33~!XmuJ<1CJ5j9E`~s@Z4ek^fuj2v^K)ng|)je zO-ZqmG`(Gp(RZa#@h8XOsa&oNL@I||dCx^kF_IG0Sv(4rNm9a$i-1?zjf?CX(k>~1 zR4N7Oom#FiEaZLrtZ+Sju$dPu6P#$B&&`Ua-)TmTqohJQwACoCI1Q9|#tuG=lPf_m z_;1qb>`4O!TbO5jf1LTHmn>^@f;WE+Y&A_=#^mF^u@L5#!96Sof!#5spH()H>|9_g1If`NYdkML|h$G?`6zEC{#}TUEB7(mHMU~|2d{#L7u)DMs#!~I3ZNO63^GpQE(g?{{ z5tT5?hRf!##Cfj)9~chG23YFDnB~hT8h0BwzM7F!P`Tw@HX;o(0-Y%v#`W8! zEtPqxWj9;=c$`OQ%6Y5C({sM%_}b>l!prya#cg5LT8vBtq2N3tuWg=?e9J_!HZdl@ zEAyDC$P9``NK<8lqPvZ#X(gODdddbUHs8x*(oX7Oj{WN(1bMkd=;0qu?vV%dQo2aA9%8aYGGN?1I*usQ^>ol?`Gp>G|P$+;v2*!w-DfJkK0XKl# z=z-&tj=6=q&D3iOs6l`;aQn$%lPVN(S0LJWH8%Y!q(J+-Z9|Wzqj2eQEpQ!eOuqM= z*5=h!QIuS!Sw`)=qGnF~64K!XXg^@b_@!I&aqLHp5L-eo-eo$k5WXChc-j~+lUq^A z>q*W&pn7FZ+FTG1c*88UEW+9c=0gBvWQ7Hg$+pqLP^rPR>)IDIK_L-0hC~}?c1kxx zdiuQ?f;rBt=y=X!M@yNt8J`jEgq8-{g18A7-*8~B<>`YAfjmmTbW27@j-#K(c`N+g z_yPFP!NqJJxe;8;pe`c+`&mWa%Fn!ILk>f&wi!Lrx- ze7d3ITR(nz;nl-SYB%b#px@Bt2bVn4sIyPMUd#6{sn@9MnSK+Nhb^hO>HVhtx-H+k zq|T-;+xm@Oet5}qn>vr`*MIq`%Idfp#kmQ3dS^0d0{#pV{|u7KWP;9TWc?Tmm;6S$*GLG0)Er^Z6W(7_$=oMRfn$-;wc=O zk?UC%I2^|fq2;j?un=j6F8c7agQY{JswCRGB~{@LKfLE-2m_Ef#^-DTP-I0HB=aKj zon;V)%1Ub)Hmz4QY?@B9oVgZEM5uz~uSE(aX9NU#c8nd0cH-z&%mvlWJ-(PmygqUX z4{O9r>TK^HU1(gvI-qbPo)2%ttITG5&!6QycVZqsKYgz0RM5~Tb6^EWa?YR zoJbLdMB*e5ko=gh`J~3fPxAQTiTD9?z+*m^v#B()>P>4CZI!_e3nJ`NhXqmhSN2(r z+`O##ZoKgZ)k5BByti|^y;-WO^pvGDOXYIv_z9*G;*v|`A$IRF{9NS0j;um9w#6M5 zn|jFq*pffYrpu#~;YXN5fnt03Tmft3#W84BiDmY4agDq&CT*{6_&(WHl^^FocI_aE zitJXf>Y1UY-3frnKpsi}u}fAgItA@%teV6#YgNTY!O=~#l6pHgOxdD z(6sDw_-%I3G*~TMdjG^+n|Efzp5%z+T@WE|K8t1fdH|UvQ!84ZnH}(-W#^P3o0wo# z`wE?8?NoNZ_Xb@ZGdAajr#d(Do^@{KEzmeM^MbE^4kzZ}6?;>>!<%<9SH33VslnE3?6EDAKqpfFBAw4|Mb-hrXc^}I~foRu~`wf?# zDl_rovONoPw#+u##g@4+XRC7rkWYC42`JeKK+fpY(xs65A)wgev0YaBrwYw~DVr65 z#9NYV`fb#6{{^^-E)SX!{h2Xn7uVvwZNFC6!B@6`Bm1t79X;xB*3^!TFI*Dy)>N$( zUi(K?2x7(UMP80v6~kmXcsjvD{+R% zpI#d%E_sIYaAovgId$Riu{X8iXS}hIpIVZ^+0dCk-Y9Yul-M8X8Hk5KmpNW?!yPZN zdYxF$W%R$({EVB~d69WH52an9MGI{laLo(ok9%@%Gi?q?DoGf~ezW+Ger7#lN@$kRoPnRt;VCO|V{k2}LA zdc!~<2I=z|snPESf3kengy5Ob&BLARg{kH`%;YSSAnzW@0X)kc9QB-=umHSzsXZa1FjI1 z|BB*!8kLj9;4CcIW%ROPJj@!nG<|~PDc?a?i&&2@Oel(>*47IraI_zmQCu_kD8X~_ zVIC#X@F?-MY)wow<{fO8e$}(}3GPj9?esp0!;wc9I3dLq%iSUEn;)u89 zsD{o1xuJ6#HXaA2X3Ui}-f%QyoZpoStxwwh)AMu}(Ws;ATmf9tE0cwR!_EnZ`IR!+ z3%^p;;D`y*VXepvj_|C~>1_tbk^yU`R;~a?)~4Qg4H$hBRn9t;tMst}_+2sToH?Uy zruh#GQ&C;~8Kx4Jl{e@4+}RzdJLGrP80z#?-~JUI`#gj{PJ{!-sqr2OTrYnosxg`= zM`+)^UZ&^dQ_E=OpNX_3N31cY5+j5Ek~9bAN5px>%}+f#jJ#jZ<1=ioY3H3XYvl&v zNWsJ!gpJ(K`7{7Y7GCnK0Xh!{87!CKzM;XUP}`00I-h?o>1*Qsx%tJnC&Y3lZH4*! zmy^#!+L|v*Tdk&#`brUyww|IZOxpT0U!%E@wpxA9Vhu7&@?}ig8j&(?$`hcT`_{y1 zG#+mBnO4soxxw6^$;?b&u=AsJlPWQ z@q{le4c`c%XCM3g^=L}T)jf`W_Rr+x#-GNZ*YAW)hiJoOj1kqs2S(8jsr&hRVtu*g zPdnISi(vId%O4YyFq?rX<$jg2#U$9G)Mza+*OKh=Z@_PTJ6&F8o?m0mZfW%}073mc zQie*Yxw;>rFgg}3Y`-0;?f+hD`eJ?pkb2KrGY_E;v7c?k&rE8PT`T$u!k9Qt_wFMx(Vga45wnza(c%3NiBypR3BNWs~MY``@v&dHC~p8OzY zeDCbvnxM{aVZ3p+q7IjXx2)8bTF#Sc6;w}h?6Fr*UjVqhW(RI(IIIYcunTbFjgPYK zT*ez8vLRSlxNu2H%Zh6ToK6jwAIr9T5h&#yx1BDoykor2&(wbzOMc`a!#7`V>f{gC z_*7eFTxQ<94l{e(Y2y%P^>~puZM>dt!Zji0Sw203%?rLRq20zHC=ZTPI~o{2U%$%| zO3Vu$?`+{}pBH>js3%uea+GoLUwl|mMs3n`M44jj7um6)J~3v!;D@tE?b9)&D1AC# zDSbN0LW4>>nGL}H8SB{suUlvwo(CB2y zHhvTGPpzUcuUDA+6c;2-v}|P12XANMxU7?btNxU=VZv1h`Y7k=%I1{Q%;K?^`;H#U zxGWw$nDo5(=&=V1QE{kaWH4iWaBw)8Fe1D$C-Ra@rwE}rW}?)Xv8#~i#5lt=cJkEY zFFK9Oies``jLYJr@$gvLhrls2IF8#tdns@=_mss*-Pt3!I@iqs8sWpu$AoQN#SPnA4!ti%X07H% zCh>;6$!Lm^%ow)!aH^o*i3rbj+W0=Jphb-QxuJ2df*Kc51*z!mQpyiG`V`MoxaO7R zDVVm$^OR!@eL2hO<0HdswRGNKj<-Wr|d-!Gz!l=EIoAeHag#_+XM94$+#- z48#h|hmr3`-7YCoxG0Rl;J+Vwj{ZWEumY0ByUfXiWg|&}!v4AAr87_dQH`-bQulS2 zgoJ#w^F1fCKbOJ&3?@*qSzq&xVbtfl=$N(BZXBJtx-aF4dvl*{CsW7M8e&!)1f0LJ zwXakXyrOrYw`sH@+}cp?;X{r9;U+Re2mQnC+wuG2=pP&)PV^7!G7QR7-3az+96pTf zq>D7E#4$6FuVmHNyp1M4R{a2FV>ZAXQ_MlND9cF{2l+g%15{5zEKJM1E*Wf9ZKYsd-wOhCrIx3dK3LPGHQRwUrffhwAbS4{>Lg#=}p~F?L z7afJpF6TmL^i8YAg*-Ov0N7w5z~=I zs^!U1s$|b)&;H{PB>P8~P3Z_Aa5!|E*;!r|5-S0u&A$~uy#J!kpO3>Ry-0L;m*8j8 z9(gn`li)l1iHwxIXtF?kvIC_8C{W%%=8jvv1%jUUn798|S~ z8D66pnh0SxuK!fW!)Am41?Qua07Y^Zpqvq;0s1OOay9#3-pLrCBP?JXk?Ky*&`GuS zZw3PFl!jpbc;nr+r*M!DB6PBI5SBHAsCLln7nw~^ha?nuF;66Gkud~SPA@iyLLSMo z9SXKwu3#JbT#s_~Fni^F8?0eFRcY{B*nv3}!O?I;#OpI^9DP?i-U{vfLU9UEmB-Sc zoeyS7NB+S!Ib1s*aalM6f_JLL3vo-alK*UvQTB6XAV_3^SB-NSfGw|5r-T$%t^mAP zX%Ouwh~F2k6cEFmI8DS7607pF*`=A59b+%B&$;h12LF(W=aB&Fg`fYk?0sIz43YfW zSj6^#ThFr~8;~PJx^^fh^A2VE1Ykc{{w+uPtC};ShdDWkwfHL~KK}RCY_YS7++eDw z2@sy#stE}dJDan0tOq#Ar3ZNA0w7#Y0PH$p84c*c`t63Y9FM~zutu(1ac4SacCvOp z1{a@ay@$NO0s1tK7pjd*;g*jV@B!ouh2_k%3qAsX{hte23#~c z!q&e$6zfGcw6ljg6l*Do-`ZYf49_YIQg=m1Mh~?#bU{a&vVJ~}j-X5!pTn?LIjEe! zr>&pV%4m2Vh(oUKGkQUtZUdZxO}9a}j#4P(a?z}FP^n|{tG?%h&6yd@q){nts6fou zWn1Ss(vo17mb}L~93?GzO&-~;LaYd8&OrsTs|vx11*h0-8@A?=wc^Tw3EH>-K;~TV zOjT8z!{ZgA1xbTWZAD`N6kdKkg%tJpr{MX3UAUGtOx0GFvPMoyyxD+De$Y#tA(Y|A zlG|~t@DFfq$C0~`3)NjL;gWaG3yLOXVquF{Q>Y&-Uh~xP6p+ILU6QRym>jlQsKO~T zx29^W8z&EN(W1h@s-3AvJ#3$-SA?7%tDVDS&ZDtj_4)CwehdFQvS#xQA5CiBv+>Id zIuBiXaK)x2OE$f<&8L6&PtW*F_~m+|;LX1+Yqa@;QN1qr>N9#t|4nai8#C(F=045- zTJ=`Ex5T<}!XcFRtNw6-kvmB=x;qP)*XVd&))dQdOV)LCMN??nk3xevi^Rfj6!=v%rih~x;0yOh!Je_>k7RNoJwejP|b$NmT zHr*B^a#m9D56E}+4_Ir3Gs&Vq2T;c`vZQSc_m&qg=%wbD&gFF-e+VTG=3>n`9H*Vv zYYIKhc#T}$tEdhbC+r+KUWp#2lr(7~tQ6RYWL8|HLs0K8Rg-}>JxZFG1mW%G18s<{ICw0PfpFN@Fa&g0Ar_TK)O=bosnS^^$K%EB{4Ei35WvvsyAU5+=bIe zfT!hbNnvDOVx2gL3Qe|&w%iE0k@3V-b&fCOD1PY9w98ShTfas-E(w?@n zd003-80-6jS##6_qLS{ZHZ59!vuu8H7hkSoArK8K0IX|9`_8gXR>y=dBdE*Ybspn; zN*KFC7EmZ-e5I{5{875dE7Kt=DGaC9F-r{@S1Pvvhg*YkwH?_Z4o@S!G$n(ZyBcXn z7%k*G6k2*GQ3-=np_Np;T{$14db>`}(My+QETm}{)`c_J0=uNa@Px?o+xFZ6?d(Vf zuTDR&xp+frvFsR*!gCqe{k3P`Ur01(>-I0@N%pC}U^8u<qIJ)a+H(&Zb>{C-)H3TWV)1UTz$)i_%b02*RiSfl^U*C z2QzP0;r~E(UF3bccPyH1pWt;mT5#9}u3=>Z+qJ3?wjrP5i&O-tKhv>+jNv-A`=WBM z-DVxYyYwwDTeqU(0_|)kj*9aSU_FKC{XAg}MYYMx7*z+W`no2gSot3ULS)?hN*u&4 z>J0Euw-(@-Gj!=w26q?3#RPPt$`uTN8Cueju0j(CB3S<(>s7H&(mQ-vP;!oB|1bsl_OC`m1}LdO)ziHZ3HwnmDs zB28p^HJ)U7^@V-FU9nYxzV@wduZDQ=kv1|0((yQ1bOkw1)k^Uo>gl+3Kg&VM%+Jd- z@m!w3%}3$`{C1dQ`Ko6CnH>OeY|y9d8yxc3BPx1als>% zy+^>o$xUh8dxY7^Z?(l_XZugR{z6{5)A(<;KyzTEc-_LB?&WHQ54SA7T7jcIX#2UR z^Oe0=bm&lPcBQRb?ShrdDP|s!sF(-b7mquZS&z8zxKG$&4qG7}x5#;V;BgnB4Y}Us zIxW9z~U!jk_;jKC;H54gL22xT{g61wDtZ_^k1+MvwXQ?6u;P#ycBT zKGSo;ih+%TH&txfv)hVZn|Exgw5{jp75z8w+Vt3{p8Z#Ry1D1uyj*dZ2ONv@{PH5> z@QQLCk(Gfw7i7LgT%Cg~nopxx?KX`9Zf9eeZ&7cHaOK4p{lu>{>q39F=?`8ZHjH94 z{f4RVpm6v@XT_MzG}|~KR%A-ktu=s%Gwoxg z9Q~0>2bYoF3dKlEH?uRqr+aV~I>V>$CE+gk87zxHqOoMFw7=`lz>G<<1k`X=vRXetb~2ErJ4#RVywckYv~v$-n`G8lY$RDNYk|b<##@z(!isVAtkdS1a@O zU#85wNfbONTAE_mZ$*1p{OS(GkK;TD?_!ycaU68 z!Mf2f$r`%^onObIbG1)=E^_G(=-FkiB2}KXpsOGF^BwA83)rv-&&&lM?ZdS5EI&d< z#}g-R!IlE=smK0MBdxm#z6%`4o=iLYe^vy~V2&~O4Rb7aQ;$uGhhw_uoV^}DJ+zb~ zXi#~h=gfy%t`yEW-jpG{qH>NPqh7iumn+$+)*zF(B6Tejz1ylW1=({ragUcIf^R$? z`Yfw@m8+| zxw;mN*nkUsvaY(>D<)fn5U%=16Nc4F2>iu5d4+JtC=O1cd*T~bYsfll@TIhXAnnzL zQ~p>-H*>t^Wovy+^d_*erhD@jj^I-IH{YPxHOLV(-rVO|`Q)HW!f3qI&&M(KeM5^_ z2jabo@4(-kdC4;pl?9kFmg9Db8P|_160;O51~5Daw^KH#Q?*oQ@sR(lR5ko(8TU~2 z)&OC@k@*`1(T@Uk5gcUga^RQ3uPcR0W=E_Fscfk1(+Qfo+bnsK5I|?ASJ-4pTN3oRJ760wE^x{ju`wZ&ddz4TAUoLOjdG)5XyXv8T?8|*u zPp8Nf3?Mo5G&#GXak0PJwE)Es%T2h%Pt8pNmE5G|7I{(@a+4<(Avfvn$W1KHxe07Z zWhd67*2x+;v9>1+2ye3Jp1_CQKw3ScT*3q?u@UtLnplD7uu4_)1cd1+)ckAU8YgMFyB8vK=kvkGk@2yt zG1B_Xynz2Coz3ft-P!E%@PX^-e3}TcE}DLMiXuD7x2Kae`koUBmyIe%puaA(q-d>Bl+&B)OG{b8i6I+n6!0UU1IHXDgakQ?% z2Q$onaM_vWb&rYu*64i&(N2j=m|(l1|FHxMeVd< zY#@%>XH&Sw@Z+1E2^$7XVH3U|O7+8>7fY^w_=O2!;E=+p`7Es7!r2um*%gEhXuNWD zAoyMlF}ze$LJa@8sPc>{DDLD3H_ow#7+_XhrpnEp!}QbRZp81dJpM;cc0*(d6%KYK zCgL>1rt5z-cj~ECH0dPc(D~%iVnnRP6+eoI^${08*+;XrC)J{FS+5_LJ+wwXmcy_H zqvEI<8tAc`x`taohLos*m$R4(ddhR}<11P{f`K3vZCa)%-4{9(b_3SfQw6%(P=Igo zqE4`JO@#7Q2{9_mTL&!fK@)R=cV>A%P5wLOPwNuOA5l_plcmqOL#~zFAy@A8oDsk@ zWOZHTXPD7B&3c2$0n@;?v_oRx1e-il8f>zp+b|8E#K%eA_o{5yAy0|){8qLe$kpZS zkSat7a*BfE26EO{5OO$GlQ{2%nQS~Ym&0F)6V5<%Z__1PbhW3vz~mXaiO<)so-Z<> zr||w19Q!k#=7sMQ9tLhwdgNd*2OB`a*k^l1!4xeV(BX=f`{QadgJiXV5J`x2YJHSbZBXmv}DZ^kgpa(pEGJIy@KAq=)$vS_cCscj!0#)wc7~ho ztXRY~KuD|mpAc!zMvAh?U#>17X*+AtQyh+Tr1n@9}awiIuVNIb(lO&(A~#h?Vac0sI-kHBuqvs$gCQ|JklBlg*w> zR=qO`C;Hn#l>?-ex?bQu@!#6k?McxN;+fmaB>*QoXV}ATQSJe4czn0O;iZ!-XL-1$ zCHV>G{!Y@t=y$cODGG9B&5^}5-hG0}X;Vn=z1D2#P|)sc**l;GFukH6*R-HQkn6*W z9I4@dH*x@xK#o&pnpgPf%zQ<%I@X=6S#)1y&Esiw2xJX9>n;oPRnrnQuo%R^V;F10 z1x1X?c@G?>i|8E+kq><8jFGM4WytjwxSC(2z;z3J3*_o^N;9MvwZ=*Yon zldcYBBGwJVLlnav1s}$sF6MW{VJ+-ZGz-fDy{`EfrJ%?!<}XOqh_yEC^%01O-8rb|XcT z>yfh-4Dd8e)_)t~ozAC+gVlD99Lz86FB)&xhd5Ml4=8sB5slR?m$VL}=Z9ev|AM%(i9SxVcRGhxhV@{Bv4m=Tz9d}8b#s$0uKIurHC&@7icE5*asLvd*>TI;S%rpYfT zYs2pU0c9C1;uU4tEccvaK}JBr1PDe1ow4#aAwqTD(T|*;F}3r;3%r=hL!SNnAys(q zD-MLPvcWdFiZVOAjFQx*-4mbT9`P6XaGJ%Wr?dCO>le`G@Q

5FPFj=7^TsW+%%% zna2^Y1EH`3mx{SG%;RXk(Dt2o`~pIcud^|KR*1j2R+b|c<=H>nVGiia`~OznH^j00 zAA&g80qg>C43HZFuh7^cJQhnHOp{zM+>1B0*?oc{&0_x%2J$WtYR`WEOt0aM>%6~l z|OpZMU#@aug5&jMBzyWP^*J!gs(EYU@(o5K5M8K=CWkJfRD5KmG((ZxSNP;oBwi? zfGK?Ppgaxcabi#&MDUwW$l8VK$>PMS7<(54r=o0>mZ)8fJtVr8yZxcaQI{1jWPi0Z zA^WxuHgX}mB1hd_Akdi36#0VEo*RqyT$r;uwdbzj`}1I%aaYPQ(_o}HZer9~v0uxx z!i$GS)uA*v1UV<-@P(H%Bldeib|ffHv4u23R_sAU^_We!HdWgKq;p2l&jHbKiJ7}c zXcKC1GXb6n!9F9~X~HH0IGW3#K|1<|;pe0vEanou6;TkE&07(#7xokn42>3!8_Zg< znC9GQ28uHc-oGI!TVz^9O;vl4A^Wa=^@Xu`VF)G=nYERiy%kFphOpRvU&4@~pSwCM zdaX5q#XMf4?s3D)GWGWV8*xLKN5f5sdU6w@Ubkevah`1K%=_x{(c^)K=ih{K?ZOc5 zaOUI=GI~hS!Vt8YFx1RlQub0U!GUnNbk%LMBOFLY{zFw=d;9N+EjGQnbFb%kbmo?0 z-81Fp%=4!16>MIUnIn9Gmp^*=6SZ@n>1RFJ5~vA!?3VenIH4Mtx%+~!8fEv;R0E96 zomC+??enNM8*{Aqg1F$K_v0$`GCKccrRROc4R|^_c{+2e9yU9(a9fTRjBu{Z2s1bM zpTsf$PKh$X7}2UMO4Ngw*fT9&iTZ!HrynKC&Oh(BMEL~8qVph$zx*j6OA*8DL3mN>7`%U?`Gw^q_XCUisj%oXPrLW?H-j&tOF#cj5S+Z9|37FI zVOo%A=o=z1^?RjMdG24o3a|}h-L{3==1R{td=j6Vsl$o!8~20HCR^|a6salI3q)}x zduT0l*w`$=cvO0eBCi@n!iSal{U@FX|~!QmxqT;cRxv$cA(0%UjNk{Qp;xjh*+6Y!nkV_Pk+T z=gy}5@5oRUA9$|y7|pdFXXOs_^YS!r?s%w<&om?nxvG;;3svetCRyt7`ySg<10nTb z(=)l$17qywV(CnRpP0UeVs$7x3EdIi{Q*{oSl>V=xeWEbn&v)eHmaF3pOQo-@lBhT zpky1L(yT$>xrT7Ss3g6muD3>A-`v+RD!K zta~30LUqdGAoLL8M$HFnn7fC$AZm-goiHv!f?6DtqDW95Et9fr5w%cDl!xnJ$lB$Y zbK`xr%l!r%%RVvFF{e9U3Mph?_@F&6k3fK0m`GtJ%*FQ0iK_L-md~arHmTfx?3yTf zjck`oUs3WQXA{G1vs4p9Y{kO*_56uQt#S76peJWVLv@O_DUhNV*u5OmRQV9ce=UlK z-M`|%m%Q873lq`TrHNUR_uNb1Q<@d%BsM+*8V;!QUc!cTQLyK*mUxYK#1rjUTSdTs zsaLeaL#X?j{lbM)MokeCuwQ9Fw3FZHD`eU)#lA;eristV5z?-rSx%$SD`%-g5cP#i zmE60t(W?KBDY)AyZ3oG8d0)}pYWG{iU}X5mlx6RUaai>@b?^^Y@DYwBxMw5B3jAuT z-E@};+*oC~sT5G}f})5Z))fM0)G~w>puhd-={a_Wmd~XrD?_WoHvfjS9UUwdVoOehc|hZW1T7|`xw6ZGm-EAIYL)Le-$TF@Xz+FC|+dx79G&O1>&(1j{9hSDzY zfR)HF*h>0Wm@=$~B`BDr@45K2<5hStAAsY#sgK|b59aazrRY4}i(@Rhp3?9H zNqBvhf`ULiUeHx98O*k=A2XN3HzlT9F3#XxX_Ddk_gUFuqszic)?$}p4S-*JlK5c+ zb)#y>yXE{iXr`_UESROu(e&L)8o}&_yrjT+-cNmaM=-l6+tai|WZP8vL(O8!IlJMI zVjK@Y=-%6LT1>RyYo5PKm%@>dk2gkmpR%U4QXBn?kdJfUGOoB*8vXgtX#QsJ!>+`J zp$S*QD}4m6?&+Xg6j{SWR*-;`VOMugNI&w&-ny~NH&1_Y^NaN(yVY-5Zi3`1RsLjRZEl5z4iGK@#ShS%@p&jhPH|1 z_~U6(lUYVFzTO=%$?6qNjnA#2-(S|#h;?)%J({v@LRIl7rB>YSbw|?XS`C?2jh>#$ z)@i_LS`1?1v*F7o(aBUHxIHah97Wqw1sE#*zV^#?^g-)EQ|?TD8PR=RPzy~P3LP)? z?fAA}HtRBJakyFM_m{125X5BxP(tBi*-b}Nqu1`~S52az?<7-XG_4q$Vio6pGh^x> z<)-w2V3I{e{Q?->7eQK@&r zdZb29n$fbkasQgF!`H5e+#j~`vVZ^Rn8Q8NhWFQh7}tJm>Zk!*Gu|BQwXLgJSP|w{q8-x22F5bjv6omoK=24x9Vr%8tM zX%;FldGsVaf4!d+Ed_zU@n3qECL0X?*2xJ5y3=-|)U~`oNBsLxKnJN^mWiefGF$ty zU*R^tp$I>L0-u(=W(5?4`Q%c>WQ2}TQ8UAHe1chjAj_&NkFVKo5fVCt&q@{ImSbrp zP<&DWnKJ{Ty($JzZJG5Q=|&u-jpeY2FOe@9V@!sl?WUDa$d$x3-jdY)*ETff{4bK2 z*Zf_piQuT8cFd-$gJpg`$V3^(XPmNLv(1l54jX8e0%r#&QJhK8WVIX|MBl88P71Dn zjzSEAK3AW5$sClL``t^zV^^)o7Oyq5BwSkP&6jgTpiO(eR+QlL*=@1a9cjBsH$`eT zD@_RVG0`@QSExlF+bmUVgbUwf3BDop+qDMXsd@oRSGXlbiqj;E1s1VltFX0~L&7TN zPuN6DY_--=_g@8V9wJ0*`Xt~8KOLYBz3f2O?saRH0Z>t;xSvHV5K8-5z~#+*U+ zUJx;Cm8+r!31K1EtXoGu;rx9~l_sw>OFEc*wqbfforfz#knhu;^ibu za0iN%mZC%AJ4BC`(X=klroS!DB|07*G@~Fn&ex!wk?>9UVtLEo)4kW0ealgZ>JdU3 zS%jmhBhGcjie5u~3SZ@QYRdi$Px2j1rO?)lx~OdR$&;lKtIZa#n{NfR@E9hC1&g4B zC~TfJbb;;qpYQ1=NP($T^Gi$?UTR?1H#jd9*`YXBr>BYKuLSbIflmuC&=2{+o@lPO zne^F@f8LFUMdt{$XKj!6^vn3eS$`Qm+)I%iw8P$+AWcrT2G9OV%3%?-fqbcS(I|!* zg*Ei-AE|s1;L?rSs?}^EFW;c8>&S*b9=^H_c@0^sADn+sk4w0eDr5^)^F_A8vaN5wribn7toG ze$zj`N7|_VfunA0$!)vSZ)?U|V|>NK7c>s zlky7S7wj)JX|R?pi&U}knkgP%osTV`JBa}rAwS>rMI@~;w@MWP$DT_yUlyY1ouCgT z{WMGVRs$7e1c+go!$@02M`SkZo0IXJO{fDuPQUYM1#~w+8h1hpnSOziC#6`Z=0S>B zJ&gi{a`iLGyif{)c&I&r(o@EKlSZ0>a8ZuL*Yo+=mXd1|Y*T0cM(}Jgc+&{- zt(ZzI+Ju@+?RiX^w6+ZnJCWkU{!0-r(hK=mgS)0-593!@Xoo2=M5M^)acp#*9>1x8 z5<~sSbVKhcR&JeT^&2on@3llLXvv zfcA!MbFeUU#wC;3W7s=`QqrIqZkiv1U?1nH5pDXosN*`y6W*CF+{@~ESDGI~ku>ed z4GK4D-`XM4QR@>~NzoRHBjXRk>EQ2dx?p@tEQWqAuaQFMBRsWXH2&V*pt|tE!&%{v z_jEQ@s53=M{pdOcX^&VY^|d{16Q#5?@;|zpe!Cgcckutth5 zNoW>~o7cjkq?Q?n{NkD9|8yReqa~jC`U5wGnzyWmTH&!4{iqa+whsQTrcE5+5syO%Q6<#8Q}vpJRn@VYd=aVQ22q;BxH~gxDwSf9yNc)2yFOO+QG| zB?@6ucm7x63Cc&9vPQ2yNU5R1sRlFv1C$n6$5;ap@bZJaS4x5TxVYu~8-+ub5%qG! zC#d)0)8Z+bVhhBtm);Q;&>m`1j*Ks7(sw3yhqbJ}9bck4l$eWSDfOc*)L4p;Hp*XP z8gQinh29b#uW63W*T|0mKw7poPiT}N#GkN<*KI9-&eyj^2Z;NhWD4vXaqIHeqzfOo z-pw(>QfMS!$!dN(iITSJ0&90`fUU_B>g|}ch|*yTER>fS^z~^vNWSm6U#!iSy_sW@ z_H&Q?nKZU8_G^?llH{kk^tNzb!ms%E2p8Co4SvyDG#mXS{uTS^LI~lfuW4bP{t7K# zd^*S$aKT%^Q~r>y((O3*(?RNN@#8lTEk)d3t>up?UAULQe!5N@Y~Fd=MvZbt(6D>w zHF&f!LY}rDkNuR%zS5$>OW#K=3Q_{tE5Nr;x8SFzP2!id$maRP0_#<(J>3|=mIA*0 zs0}|Yu;}*FDQW)P_oZ~hFY2PVW^4W4w2h#R zeuST}L^H&%DX92=DL-e|Wg|c5suqEAg|1e=xGZheuO-}dK3{O})0jLxex*Jr{P+ki zdJi=y=H1}iuh5_W?7JCBi!4;aTQ!l7>w-Rk)#<~w&=s--3vF%*ar4OM2P0n9abmlr zQGsqB;a7Y;kf_%Cg|5+B7Q2#Ud1g}4%zqF^=rKx1)KzPCm`MA&(9fmW%j@;du5Py5z` z90H}i`Uk$>2j8>Bukg}tvrtq#M!W25(Wapnv&D?r?qP8ceY-aM-#c9%(SAqXVbRnn zW!Dy$Gsw^JmN25IJ#7mhBwgSBL5)tB5y_hWZyeGO{uoPJRK+JyYVa%hF}mc?jzc=P zv^~uao#K%;uK9nau!JS|xn!LwoT+T}OEd?~Yt3Td zIvR!i#^;_mlroMPwJeYSnkA3VA#}b(OHzfFaxNZ2Kc))yy!^ZrcPv%J3FrDP?5&OKpRfQ%oW8x8{OxFrDnQYU0)T2#qSVCk?=yh_yOm*I(^C7@H!Bo2 zU6aXoZV7eaRddUVJtn8&3nXlO6rOTTn!$c7KGHn!x+uQJ_?HqW{+x=-nTvnOyUC=Z z$tUOliozPHa_%iJ(=U}e5ycoU86SF$@yPaU@$UxpUx}fKXF4TUj;=ew62$74xi*ms z0{?$TuGHoaoRQmf)ZuRwhkxs^d<2o8wAEH3s8W1!h^YH^NgLX8@$L#z$eca$FXwMTk zJj;-?2ocCxC?MWj)=L&YZ(3bTPTo22(}maO9JvD=gk+)(*^>>IuTX)~>+~8=C5pfj zktLb5GALXGUWnLj<6yNLCRpN`Q2Z`Ma+NNOd_bhCOy~7M>aHTsUHMaDCr!5gDT;et z#ePb7kj16`B^C{jV3R(&jEIN9;$UgViVQ5YB1P#1r*c~X_&I=fk7NBDx@L)aaml~~ zE}0V1@_*<2DqDqP5*Rvr<{qe)+q@9?jUp>ZjWt=~B9KP6tcbN!FeidzKR?<+szuXV zoCaYP2FJv^uLHfZO)ddgfx}rB4)l+`Y19GVLsW|9+zChN)Yq`e7V^E#TmH{BUN&$p z$LU|I<Yjlh0U7V~t~JY%;+lmc*hw$rN%@mSHoI)9sX@Gl@3(^Yvh%-{UFv`0s zQ%Oz>KNv3=fIokqwG86)Jofi4iL^$B4;!#j-AC3$+IR z=y)_x)+@q+C{6uWiu4D67A29iYO?9HeO*Y}5>Hq6_~r61qC5H;fak}tg=dijOppWar_%$BOkVl2dT5xuP~*KpmR!S3Iv;D-OO3O&OH#Q4kk~v2AA6D$UhS!! z3{BLudHG9|!GhV(;9)^%zSFThQi?DM>}whoKtZ)e<`G!7qx+fZx0VA0PGZ_Frip?= z$c&__8Oz$`E62chGpF%6Lq(-6s`rXft!1eFyNM7iAy->VxW?Xf?e%-4U12Rql? zSKXgnK_qY0V@yoMloG{!{&p^!s*wsve$f^tQfdT|QiQCU+D(19YU&5$2ukb{!&FK& zp;8(q%cic-f4OYx1XK-QZyd$cSPKBfw2*aE>B1$ho4N&Q)WHjHF%j1LkYm-Bg;ROr zXfBud8rrDqBUdpM)&^oSDW;sNYSI19l~b*8?v8tl3$iZoBbZ62=GuZ~>C_s?u)J4s zMb>Su2V>f)GTbgZmtEe+vs^x`REwUiCD06)QHwMwyWMO8wsy<)se6(9r77IS|j zpDIk3##DZspk1gw+W*n#$&c2N#XZlz8A!_TJiT(X@w_Ytv3&|ETy?kqDS{3g4C%@c z39(L=VJNCJIn49l&V8Ur^mo?ZBYj@LPD|Kspv6iE+Wc8VLM>N+{BV}5zw^~>l$9N7 z$(wOBDHo!@COCm_`sPrI5O1@_`U%$L2HJ)*Y&v)a#y}zd!LOK*F)3w?jm~=*==sV8 zLcccD44qxZ-z5n1W3=(M@e~=39-DX8`U2^)nNp$)D2;M|6HWwwMrXBvO3A$18 zM>D;CqSswpw9cpvS`j(i8e#Sl%qjT4+`uV+dgatiqFk%5iTbu#Ob5>CQfQ_wsqty; zX_CqL{6X9^+r3>7*I|^dHq03yaBC zIXoaq%&^eRrMK}Hx@g@I$_*v_{}fuIr6f(VE{QDGY-++16iE5WGr#lT+15GMg;oI- z2~|bPVWy*$PftTxnuE_~f14scW7cQdsKSh20_#(KBr9=Rni|foU6X`F$AXj3w|*3s zZeBFKn6)shqPa3YM~~0ZKV8Mx7iiZEDRe5C)&vSU0Xgh*CG{I_Sg5k~C9jUSoz9YI z&`MMhGK4}`Q^lwsZS^Cop`3WSJ8l(ePewdx6KeTa&GCeCLbMr0$LECLb4>W0F_6Ub zACG}C5yWIMD%2!IZ_}E)vfF&ee#1`p_7dqqS_$P@+T?yYbIrPiRzH(O>P0M~j3DZu zT!dGSh{q+eN1G^EQU%AO^<%|e)w$;FCXH50H;&NM9#%X#I&KLP3-s;&sCS*dJNz16 zNE+Rg`aWiywC?hqH;=6BVVu;~ck4H82W^R2eW$%|s&Ufbt;XSP#g!}k`$xs>?GZV= zfBc7O?RTb*8niX{&7FQ*#;!YH+`lvC+m#2rV}`GLJ@wAs9)XdQ{96u6z4P6=U1^h! zd>HFJ(A#2o{|X6VmhAYTr*egNbYeQ4lQzy98+TxoWvy1K@WMAY(`=M!iw=sPo}U-h zX8mq5CNH#!{zIty@ljGhOYd+?&hJvti}zX%wqa)2)gEY^>YH`R>K8jMo?1Q`M!H>F zZ`gtY@`FR?V`&*kZ+lmY+nb&vL}@+nwse`&^Ye4W%p8&A*C$eN)-TrB=@hiF0$b=u z&{}X-(33@w*IUSB`l+GW&m!rGtu3JgP5gz%hFY}h(@x8@>D9&g5_-`zG!-wfl@yWz zA$*5ftx5gm5Xm!t`_%fBd^nQY2+|qtS@AR)??_``w77BmIxl%G1mp7s$)s#oe9pH9 z`{Iwy*GuLiFKO~ses#EzDVI7drnZJ0;jdBS@l>!YT=&9MQbPeg+l16J=v|f=8gNTm zeoOK~9U=wd?v9Y4?#Ch*AB22GUJ|Zjd9W`jnH>)qF?LWHqK+OK;F$X@1`{r7FkHDBo{kBBD zJ!n@k?%AH@zoJT4};?I8VxtLfs?NGdXgRB1$2L;R7c-bWmZ*ei^tZ=}bgwo>c! z7jLZ4rjgvQGs2*K+9cRwrJ&bib$W`S)RxpF%`?i{j|6MJG1@?R$2$!!H~116zd5%x dZST2zx^lM49$86L$-{$)(ihzJn6ky={{hHkMgsr< diff --git a/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker2/sysinfo.tgz b/test/e2e/pao/testdata/must-gather/must-gather.bare-metal/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-09d31edb2171e1fe385df1238ce5a6f7ca3556efa271550a7e98e26dce290128/nodes/worker2/sysinfo.tgz deleted file mode 100644 index 5d9d6983475453f6418413725d3dc2ed98e8c9d8..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36422 zcmdSC30M@@);5Y$z@rA#BxoF3W7G_4Ym5S-mP6DSRAL+eQCpmdiXfm&0wqqEIN*?g ziqg&~APr8)SRx77i=Tj-|((3CNRq7Yr zZ~mFNt;0-ub?KG;K*X5-yzld_`_?WE@caHN@ko!fehXLc`aUcoD{9e_{p&0(`uAIN z;LDjcP|)u>Q1qtI0n#27JshnM+`K5ZzBaaHKyl2OH3qX&=lRaenT~e23-54^>-# znqMCuIik9+Eay_rfo6Ggc1EP#cmYTgwKoSPPm~nJk`3vi2C2`)N!vu^fRDpxg(<{7 z$*SK-!p?u$*6`4f>PRQM?C^#>sdN0wZQ;l6tCbCtLvP5JSVoePZNjWZQoEYTeO7+p zM9{U$7KIb-ph#G;b-ty5_J0;NBG2vQX7Ywa0Q(#_7bj0F7Pe$RCayd@x!?P`ce(U_ zR9#MSWuA1sTH~=$Y_np5>Q&v9G`~AbVgI)AgiD}LI<4l<|3aBT3hn36!wTLHuzYn2N~zmElXqUs-Pl>$4-5xKG?v zWg3|+@t&|DL{Zg)wwWk#mm3!Im+W8fM*qJ4eBQ=jKjwNi?=(L@ROA<76B-h(a5H1SW`CLMw3Li;43$+0=!DxU>GitRH&sFM`;#B`Ybdn$5EQM&SRUx-h(OLc;f0( zMHQ^LwQOCaHNJD`I=ST9{jCd&_g{CiXjpn|*2KiRzIm9vspEc+aVI?KxYPMdgiB9(WG_k1itzcJz7;Vw z-%imKS6}kUF(-@C1C2$M;j@ypg--rU*5_5KYU{s>%!&SnI2}ig308QQ&L0!!qHIZ8 zCQgVtL6#&1ihP7JLr6~1L_ zZE|xTMnwnP-8{8CY(d1VZTVRzqp#SAa`KA>Px8)%ZOv4-oKNZcNBh- zSEZ~k*ytMjM?%COC!IY!d~Swq%U+k8JZTgES>*cI-bje0}zUia4n>*VbvL6^{?wlOsnwebUnmPP{edCrAH})sj5+8?(U^2cU z*t%bw_WA8f3;gNDZN=U6tJ=y#_TG1I|IRyPZ;>&YeZtDZ+aIrNy!~;X^s8IUox62B zG}0@+V(F2w&L0O>EKPOYU-3!CRnv*izwca^zp?I>Nd|;xvNAm4hX}1B@n}2mL^S?jIrS^1n;r&?Y{evs`g_B(tmB?Ga zZuPc;$r)n~t>8P?`=oy06Z+yzoFMj*y-$sWhiko$>zaZd6CW3UWSUI>da&%toQ{EF zj+9+8uWhi2Y6)KHb7;lD#FZt5ik;al6YtM=p;m{-wdA_jzbI6^NRKvkx*YfAAs@}h zR>>E`!`7H)M4OsLWeBOz}~EEJ!{t;)V(t>oG-<98}PvBqz$F zSB@#z?>ynb_(lqH?QIg0HH5qUE5_QWXf&a1DJx025yvlWup%|zP=5WL1)s$6GmAsc z7l%}wo^!l7q_y>8qF=3xpel}^){yz4b)C1yZL7x3SF@;OYIZ62UqAn6k)y^-B)(ty zZ`E|~|2tno(~0=##=udTC7z0wb7lS)g&zslouH>SSOy6nS}QN}U5Kv|fB|BR$GZB_Y@m*_ct4(j1$|lX@$^avyc1*=^vdFEf*kv&qVt_W7{9AhK~u zl^wq!DTyg;j_nmlZK_(oN`8fosR^0*GJf>xh_Swzo0W?!wd=-9Cdlt6Y~Q?c-PB87 zJ_#XbPI@ZWSY|!T@@z;*WJ{|D$cuh#Rl!q@_oK*eeDjINgUQ@x~}XOy6(K&si0kEv}D40 zx5Xb-WM@A1a+>3DLzSW`R40+f=NI;#H}-tCwL=mwXf_?%;vE*G+4h*ab!_9U73+%W z-~3gzQ^*s}0_K`6Khz`n8NJW{T$zV}SgXOrt>=b3qFWwKgjP5BDfU3|F0sB-<5 zmJv$TWszlR;URf!@qs+&ml@74OTNhb7tbla|K{saTh_&#fAVb9*UcsByOzh+`G5PV zo#4=XFilUboR#8jSD3V=q9N1u`HUJW?F+N~mz7nTlTTESTV;nz3ac*FgY%a31NE(; z2a^XWAnW)-i@^#qHU64WMT~dtLFN6{-c!YypGl_P^{U<}qh>m!$4iC5A;UIlcn|h` z<7M{jLO-G)^5MEEE5xlYThn-q+E4P~w)BLi**$U+k8G=$@BGrK@l->@*qnaV^Icq< za$cS-%vy6IuHxDQIdjo$&7!>vVNlfQ{ca_&!t$$AGT50}`h{_2tL1?|4hN}2%6^Y8 z@|Pq%mNsYGGW zA5GP9i~3RYB9|USouirAU^~mNCc0FMrK$^?o_XhMrh85eC$);8Q1Qcqv*RCXJlACG z`C1;M`XJP<;G^{=)Q!}G>Z-j1e!nzOIXf(y$lP2+npfN{UI@Pwy7tqoPNEYlK07eG zn6fNeysV;ukUI>VN{yQ6eW#iVEoRbO&mXyY%Q$AhX8&QOt<^=^hRubuaUa|?;=8v; z%`SFX`rDYohRkaN4dtrPkJl6g79N~^_J)HdJ4#+zqn3T*d`!3&+N8Yed6-b-wic-B z@(6$C$gn33ndI{P%*c(wiYXM|e?N2g$oA?=VoZpl?%L`3ackOg3>B?n?HP}iDObhP z%4=ck$z@}TD^tT26~^R?@NJsrjE!zZSyCrAJL{=y%@RMHD9p7k5at$|UTh5!_%GP+ zG0{F*oe8p$%vYr;+RO&$U+2%>vUGmRgL}|CeovhI8r(?H2%YQ-QE$`(KwgyC|xLWqzU&mb9j+YfGUyEy2(Gl}52iadsttDrsw->|Ry8DiJoR z#6yJ+2ga>4@~nqvDJey;F0=2N6|YotKiTWMsNf3;Y#Rh-T~aD`x0zma)b=h(hxvZ~ zE3Ff%*7z+uEi{u~(rlfyX{nNV^i87#;!_*$Q$17NU}Wbs@5K`=)xx-3pTmy#q!QC$ z4K?&fNGN&?bsL)s$3t+Sg4fz2c$vQljIum)+>IV(9%LG}h`)~jTbOzKAdHWM<1qb! zMO)P`apwk-6P@oCkDsrs5@%o z)CF2$3}a~vYiPr}u5C7WuW$(+f)libTD(&n!Bl9C127LNsgv|-J#axST15x08p-Hq zS4EcA*MVIg8K9nXr^VVVQB5YOT?7yXKYsL-+UTs18E&8b6K!*inHkd;s+Yhx*mXzC zUAI&rm`)En!)&`A3E`3Op_u&As*cx5Og_%O%E(j0j{F24?^W=2o^B&VG&vB47WSS) zX6r_KN_5R96N>WSdNX+qx}+YmF!;G@DWom2pQ{kA(@ijf$?TOub{z=p0FH~{_r2=C zJ8Ip~j5RJ?A(NMVkO?eiLVf~;hOmWR;>}uNGh>*k;e{D}$ZecF0RlAS&=@8+`w$~6 zpve;G6u?;HYQPB<5ziPp;8JV?9xX8W;X@_@XOw#t8bqLZVq3>c^_&bVCg74RWvp|+ zWK<-Pr@L>El;~>NL@rhEXkot^c%)nP3E4^FhB|k@p@9f=fv<5~w^2XS{4F$ZS4Fow5qOgYdlPj^zkG6m7Z`WV?D``&83j*a z9MD{#AC$p=(ZgT~9*W1S*pukR^w`ZVxjLwtm=@DQUeTR`^9;8L-0{d@?@_QXLr&(# z*{L4~?;)p`*9y3uiGa%;@3R%Qk4=J3oyG7Jmz>5Ue;Ix~s^IM|ZX+sbV?zzhSOvV! za-y`p2E0oFPiAL@D0C1_h%`J~n7)b*54ER_m0ySc2)_s5eK^BN1tHcNccyvlXyYfr z^>SL=&x;9n_E5k9tKc(5##uI9ywgU=+)#kChd-#;Eu%-XwrJNulhFV zA|;GY!8Z3u(yVa&Pj%GY7~Q$O$z*7acV=63`k7sg%gookWU#yaFiFYf5O_x}*@|oU ze4@dKJTh3f9+B!GCAB6dpn_VDbBd}X%Agx(&)graB%Wx6=!898y=%&_D~x*-nAG0` zI~8FE4qsSkBlYHoM@w&ue*l_r=V}s9A9;J@*H;+1?iTMnvL0rZH>0O`XP5(Z)HE>r zAO{c9W7v{Oq8Fe*sF1uupLPP5SuPl|MZ-Cmq(-uw;Ta{tbTdmec~`^xumDDh$$&d5 zfti__7gPAdWl+;-*L$lcGU6!CHK}xW))8uIh|bSlPXMtiZQNOg2lODb|9RMv#kjzx zMsZE#dIe8T7-ix4J`awpA4r=d!2LY9-DY=cGIYC(mS{uUjMaGyoY6|| z_NNr<43p}JFzCvzq;m`LX=DGn3LegM%K&g(%J~;s!uni!1nW%MlY_^|f%f!*E*hfW zC}V>qIJJy4cF8ewAKKw|%bQ63ZC=bs&b@HYV14dDgT*+x4Sp-HI+oTpe(~yiv6n4W zrZ83+xH&o7%|H10VgW4i%gYiMpABo;!K~O~DW|T7Y9%43V5rtsK%Z}jC{h*$Gd0Nz zYp)rTzKS(|it2SM1mqQ{`}yT;A=Mffv}Al_&iZ0%>@DxH#iTMrq4G{%-k?qFb>3Hg zlNW)|s8sdS^Ighf5#7A$rg@uB@QLgK=8@+m55{--CqqA?sqJQ>6z#^w^!wGyc-j+| zW*4g{rNB6660f9@DyHkqB}`8tlzNYkj1xSZ`6|a>C5xs`S_;%ZI;FJwuOyvDlxAlf zU6&u6uM}up^A}9@ZAvTk-z>jQ_TG0|=qJCKJgvmHVe|F+sSSRqss`~ZD)UmLIO1WT z$X85c6|1cCX8HBK=(cwD`lN%~Z`%5*CY~O2@Rm@)53y0?g-AsG3?+~VOng{YL{(jD zpm9zRk>RoQv|p3hX+!$OBL9XIQt6*k;;*Qx7`W$&f=D9^GQFp0E*7i0yF1F6O`*aD zUZ{(gW|M8dL^_Pr$ob*2>d4X|rM_cGrvmvyW+biJWG1X8qCe3R!6T(L ztc4`Mk2QC;C^Nkc_lbk5V?+?nYcXKv{d1lqPp3ZsXmvY%6oA2>#1ak zUk%mvqJff9t+{zyWj2nQBq`~pzR$m1q`GTLl_m<>ycMqMJihkY$}z1eF@?ToRR!5* zZXuJ&I_e6#67GP??so7PNG5vaAYtlKw=9V)K` zwtTTvxO5kh_MHd{@48|1uFI{h5>936)GL`X8rKx@?eZVYK34TC|c$L*P6@uXAHcOjlFFtn{38*4QqOi1|cT()q z__*xz3er>Ckb7U!e}gv2_t*jOYl;hMI&h)Q(pwwk!~N~a{axMM##4;w51JP;UPb@8 zO{QX)lQK6Q8ru>_o`U-oaKz$9%pSH$2EnVXP0AN) znf|XAcwt!kg?#ox6?Y+ZA}b>A@Yg#=IDoK%Ha2m70qcDxJpI=yU#;Itc0o*=$lQ5> zUbB6*?u$M;KVDXW*;Lk7(uS7AK*BOzjNus0tLi| zSr=EXnNs0E%YtAJ-{&8j+po6K^{)a*}2fa__SrO3N3};>gI1jh!Z6CXey5z(NT# z+GL*0X|=%5mdGDJI3F0TST0!jqc$Oa5d;->_PWhTgkU9Rh`mB+4yV>vN?+W3 z=&v5Ci5nr#76M-rR3GjsV`Rj5%fY6QGxWuP_2=PitKZ5sFACsvwVID1R(3mbu(E$s zlXzy1MmWTG3KNvr&HO8dRte6w46g{9W&&o}&B8fTDxRxdKwJHp7org_ZKU((YT?ae z8$Bl3MfEk_<8KSkGn*v+qhV(`ElN020}-c}UcdCi7!%%8GFAp(%v?avw4HZSMmU$f zdT`*^+NwRK^k7%ZtTZ{KKw`ZN z&M|@2mMaY|o(yb(esgx5*o|OI&Kqj<0=}hrYI4Zfklcgcf%OwOix39R5BP-_PtDYt%sT_2bxEAy*;-UUTH@f?QA3FLm=Z~lvTNP*kd@> zLL#ogt7e~rk+tw6n55)Gtfzu>l0g4)j%^-?YhbRL=%xtTLGwx{M?rB)K$(=*?L2esa2mxcOUWqmF(YakP-oT1Z-bQrsAM6_GO{Jr?9whdw5hq+g=i5oc~(OOO$av67r_at}7B2^QF> z2s}1I4VAKjzRqo6$Ix&592(fd3yXkb*I^eP%0pkFseOVD~y}9>o8(}2;0FEsx zhTlr2eEGSWJjw)JWP0O8_#naLB``9Edk}8%OPs@*jzy&fZ*TrHurLSjW9LrKpZ8EM zq$cPdb-v?KQ zAF!LxXdQ88!@0-dW(Trc*7yiOlN%&EIIdBX(KdhLI9j-wb}59^S6j3}3t;eI*c?wV zrX{fFEYg%Rmvhjdt0i^=ycqs& ze=XFy5GRO!;^zXMXAL=S!vb-B?UxeM&tJl=FD#g0w+W`e?@UG5k+I;y>}xU{vL8al z`^h|&_js2CYge(?7DloHq}60~4cxt>KrCNJDmLWh*#yCXwkE>3n~dDGr5|M5!fo-s zUm+i=y&2QJq6|_FLCRtGGN>)V)tCljlp2xuvbm;8u#A!YE1h?D7y9umEPliY^<|72x;rq5Irrs7qK+6YyM*Uw{P8RL-ZZerN`WVc!Y=-CJegY2fLO*RB=1^S*n&@_xrLj{o7BoZ z7ol%wDOq{}UEQ+p*f0`esG`=;#%Iav^EyBMtbn|?Q!?0CN=Rf-c}PWcl`)|&Y!28$ zY54suYKf98E?ER7WlLAw?=+3Sniffe6q74k%zj~%wBQy7n|}A2;HLVkDeg zK1EO~xY4}MAdCCpq!i$Xu5XKJY^7~0|zJa@;kmulVl)O|U};3Bse*T8W4K$uQb8b-A02Q@X+Hk!FS`!Y#pji2``m_361(>}#{ z#l2JsOllxcH!e+>+-Ab@9N>IfVTf0qV8UwJxDHAMAootioc`9UVZjDI9jltp)2}=r zi2(_79~1W=Q_Y(J>rZsOJS!T8;_&KlFrFoVT!W}j+TXKdJPm1buL*ah7WPg9k#l1% zSup0A-S(l2Ri(%Kf#~|k^b?HMMNJ0IL2!4>!%m1N^Y%IzaIOxfGC5t<y(poU?TmV~#6_Dx=c6ibDYM6#8-4Q97cPjg&YSxTihYv$YFdkMs zAiorlzTDqo_&4U3y=%#519KUx}CWpEtse=qR^k|kxvb})70VXOdy+henSg{wCfa;dmndc zQj0O?zJodA>*Nh*jqoMGlC7e%c%+)`k8~Y%9$>nvwiO`rLeAt zqkALF*kY}X(~!L9{TDXHRsIrt@#}u1k#Rbi2%5{nk&~F5m#O0!qYF)QB=FM~+O)Hb zWKUI%MsS{q`BR7!0qgtCsa48fxd4Xg90k)#pDw%h2P*W4~l1I~hS&ZZRiA51ltd|924# zIeTco-*6r|AAhBfVj9i4GYH#plXZd=s>* z5kJ6&25PrT@_R6{MQG?4IO?=P4O&?tWZYqd&flp8i;o%S&2sB?_!0C7#(2a&_$uH) zXTj7hACRw<&b|cF>qNBS&fBYptc3XIG_N0PS1pniW275T)*I{-LvjT}^u$|_orA(Y zLmU{-B;a*nsju@eM>A+*tcG%W{>Lc|Wl~}a4Ih+O2z!4_+K2VCSD$JML^KpP;7*V# z;9P}l-m9>tIYSOXZ__)IPr;xP=;Ky}SqkiT#?asf1u4wcZOb8G)dquw2bV)FUKItq zG=hIwgJIWJlJke%#x}_*V~YYzC2*6ui|&bGs5l-5uw>i=h0wp#yt`V#Z}lwQBnthU zp0mW+nlHK0X3OPN>fxy{CQY zMx$rLCmhZ|-y!pdk%dT&cFoM=<$bS^1$phA+XSDqz-PL()CYKO!WeMb0METD8{q#C zOwi321N_ixHPO|+t+$myGWSAH*rY@7dad7EQ{oJG3ZMnf)=0@ucjSFmK;*+Fb|(-< zac|7}fywNe>HR>m-VdyO&kyW-?FVkQ`vE)5_`9~k*1lJu@FEpKXkZFzc-rOmMuu-oLk?3!?mW578gV$|faDoZX z?#-Efu!Wuzc2!$%gyC6`1q<1LvK(fHf$JlXKVTM?Qgb#=}yfHsuZ?s$De!Qg4H}L;dsVm@FDO3Xf2j^EgP&d-`>;mRfi_OGVREoz`uNsMLE#=j^ReR_onD(bna!>-Ym+k#%( zHBwC-XOXq*X1!728P&}*20}FG^0_Vf5nMJDV(?cB%;u`+w2^jf9&3Y}o}ngB=>8cG zat+)JRL4Cfc6-W~#4Wc@xOCcm;E(&blh4kO#3xo$)7Tq-q7!v4ryETfaTrvO+&OpR z%@}gXJ(|0NoWA8Z3Re~3cOuCI8e3G8M)Cbk2X=13Baev#dm(I?2Nv%MEtTXgWC&--2Q~PgBg?Y2OZOW6SZ^Bq7fMcbfnzsAP5^0gfDJ|J_qk`7cq$* zk8_^&IPr%p{azb+ddj$*mTDwgEGM69qfXq&RBydzJ7=^;VzLC3O`iGJ(XY@%>34O6 zN3Ce&S0Gi$Hm9n2>>J=(z3gRRER{6WN}hn>&P5cuZfaxB`fTlIDIa|Q05Xs2VEf!h zkK4cFaQkQ(q_VjEk`A{o z=5YHIJ#NQf$>H{Dm5{^jgXzQ!A02MD`2>lK@{~3gB@BiGm)nY)?-f3&K0xdatUy!u z%QF6G5b3zn_yEz_Rr^>;v9%(N03$Zh#+XLKw3tKcf&k4n=?7>LK~a5 zA#O&5-o);n{z%D%dX$DCic6CTc5d}7C}bCy3oUNIV-u;4-!UWqyq5Qz&B)o1^IZ*3 z%4O|b_`xt!Ft|?g6Y>bzsQH~zd(tDvb=pua267Wj)#Cy|>GujU%b|S4eXxIm`Rzb9 z@2()wlJ&+}O-^Hh`%BhoU~>KeMvZ?Et>3ij9Z=8#DR5qK<}H9>(qR$gc8W5^6V`&b zoN$UM0mIkncsnRzJtSgoTDYnUy5dnbzP5vb62w&QkgB@i>PeytrD=)l`M*rK$6pvf=PYT*Z~Zg%VgT5yLi z@Ydp5B3Hnok?jh}&HWzvD<3TOHye=-()wRqFA)&RNS^n;57`$`jf=ALjYY8cvYHB_ zu7}YqlUzIECy2Eorz}(nL|Caoh>R{Cfz3!411vK-TLYqKlT&KeDe6??7({0>kn6MYaJax`mf6Ys|bFUGu?fKe@&Bq07J|0BQUaZ~hz1peP9f`iQCP^bqtup-e5H&v4^e^o? z$zTQour)HWDG9oF7Ly$MCu&h*ZOmEefL7osFDRWpTxuZlt*ly)kB2tcy_AQJYK~ zqLL3^(K^pN00l*TE9Q}m8BL`P`xQdxCPrD0+`sMVHR!caLGnn)DH<>-M8ti8Srfwe zoNK05LfI!sO0ubVZ?$!q!K)aiG>jQdlS9lEQapx{RZjU?{1?cEX{k|&%~(pSD?O*c zph-nK57ejQxtNZh|=tMWc_YRWdp@9s;2N2Ui) z9s3RpPg%>t@E|19RJ^Wba1J3xtdiP78}7QuAT7bh?lRPNR8ts!IB;wZQ(odGuxSV$ zM7$JC^Z4p!sz7`&V@d?U>^#+1K%ppwFRot|0ed<61r z4=Pv=?rokekD8*hoLuNN{r!+gOxK0P4qIbUhwfDBPz9oPJWMP-(*C;+G2UgFUN+VH z$j_3Sws$)Ip`U=VcIsdPm+HEL;0qBJjaz zt|WK%!YzwgPA@#T7#NNV5S{0^0Ji!7JKwU+b}j&^iMKI0W<4fO&*#kR#HI}Sh@1)5 z9sc!-va|K0b0P4Y2sRijMHU6KXrusiA)fWXcw9IK*-oA3>8bNY?@{L;bx`M*JE(J? zR=^@+cf6M}mM2HjPp z9Wp6wWa_RCithA64khN8AhIdjCD`Epordqg#I4JbqZ|u^)G6i|i^>T(`4M-7;BYlL z5KF<+f!}oj%?9{|^QSI0y@D_RL}T^bxfh+mdJBp_kQQyH{)q0NY0z6$KAB#Z72$fN~Ejr%c?mNkj@?WZip-Kx(Zk&k{W zxeilO(~vmX239>SRq8Gr;AJ!Osnx*oYO`O5aG4T1D@lj9 zp!qZielz3d=HfW{jjVfF80`56T$;-G&79jn$E6`R_5=b0{_k*UEKA0M1(EXwwWls^ zaBl~@e;2~bzaaB;c`|5?d0sGJuZ~pz)A076q} z&yEdy=2d#GIj5zNQD4)ArcA!W4qNQOtX&&>Xjv;YG@6mzlh~=rv9E3YUL`qz-fsU0 zx&%l`^xb6h2Ri~4vep(CEIAsp2>R`e+o}u>vPYv&3vWdijGpnXI;@WAb8gi>7Kyvy zC42rh;YObKJ!t-QPcg|AoSM`@BJ?7pz^f7Vux{n;jQeUOvXbvHX`>5t;r}Qz%xVWx z_|>=tnD~ZnN4Vkc(qDFiVlcVI?)Ng&Zx0yJK}cS_8-au&EiB zCBAJqhHqeBa>qZ%<^N+EEeg!o+9(S!BS_DC9R=lqECZ&ml-v5wC`v*pShY3O#q|~_}-$qeyM^v`% zm@U@HUZOC1RS#ly_cNI1vsp5*S#pOj&?RKO83E>FSU13kX^7}SDrAU{{AlYgF<8Av z(qVK*2!F?=BzY)HR5mC-8e=5QRTy*lf-FRxBs8gS2II1Mq{2Ne?mpHKY zWKNH~34N-_D)@ps<5(&J^BPd0cwrho7}<2wyq7b#9a+}p^A`A58fw+v0_9=YZ@?Cs zeuM%?a=CHWh4-f=G47kX$?Wn?M0sOJ{0L~&D9J6XL9TKGI z**6g~KPf{O@jBaHuFd`>b{)ZtXVTTl zp|zvZ3}x_(Qt)WXci1@Nswq>fO>jd;@)JgC6p zLQqY&1;v>zZyMHs!6X=B7Q!5kiO{HzJ8A6Hz+XF>Sefg0V%U3bLp?^nDqB(oi2 zM{23W<@tHv=5<1Pe!B}A1v`nOEj+u61x2^t@!-fv@Z!)hw<6t-iq ztN@pcs|ijp!dZAnEy=J1R;142%PGa!(lpLYO*WimMk|b;m-^<7E;tswp?aX(X(hw6fu4)1i)Qo;ZAf>(D&Qo<_L zINIV*ty;%3mVeovEReLAr3LjuwRh0Bt_bm7vEjdA)L2IR=ivLvKLX!ag-U)-OZuV1 zA2A&sv(;wd09IzhCYLS5=0iWTMeAEHX#rbp8o|+LzcD;HY4TR4or-bq?Q+)(Icd`` z42)RAe4*fR_3}N{=#}2*&z41FSpg+(xuj=&ROE(;F6dd5Wo%m%PNKbz?U#cTCOVS% zIZ)AF@N&Tv_8%DaPFB!Ds2S6D!FSA>Isqz5SZ2H)5Q@t%7{0D#^Yj&BjPWhxjK8jU z&ECuK?|)arR;`ZPcy4dBalFWv(;1)cmZ_{UghQHDHT)dD1zyLb8qli8Jy&|?Zn6ji1 z3R{2D@GnA!me=*IY8PAaz&hHnT%GpxCyoUFcyBc^gGKfnfrd=o^LXUW(Nc{Rq+e@> zVw%a4o><_H3r05bJp#>hIToEU{hq518r2EW0*?)m2!cJOk}1!0>J;+1jM3l&;Gt12B&<=LQY!;TZQz_wVOTUdw-AKY-LMfg31*?NGrXAi zjONiQ>%CH<-$1Np#xl7ga_(ai?Hk)&6H!uWO)yZ`%2HX{=Nvqc3O=I+#aba_-IESo*XztD@ysz1*}H}lrL#I;nU`w+dhXe? z&H(1zbFkv1%enWV;Emot4GfjU?-od>4P+nxlbX6zt|ClyGF56yRe!7~vtox0+UFf{ zlhZPXA-#_{y*H;*L;Tf(7=RO$V*1KDQ%-S-9vin#r%;D>o%CIhBI>A-jM`+NE5+*7 zY4=!G8r5ksgconA)41_aoyO@Z^^DqXHBmQq%dv9kwYOdO`KE}L?c34K4i$Ae7DcD# zM*h4y;C=Zy3o1BKJu5%wbjUN?<>#9Wu&$)nsh%&-zvzK!xp|A-{A~#Y(U$=Bj`ZOwJ?Ng)I>EaAo)cc#H*a zoo2;8jpi!ei0g9A4DLxKRjgiZui6H|nyxI?XCqq{d#NJ1u+E9du>M`8=hVJOPVG3y zsrArvYUf&n`E31r#Re3Q>CTa{C7c%mmgU%N;o$dr6-zX$Vo7b*&S4MUzY(coy_k#Y zPjpfJ8W+`bP&gETh~TFdo;?fI4^`_ZfbA{tGnF>N142eG@2{uSoDHV7Q)*=)*pA4m zSPcAiDi+i*v-Up_hKVVff1^&{J!~OGe~J+oSRJJoJGH61=E4%}P@fx8dTJ zjyO-yosYMvB!;YRm8(JNejr{VHLnl$x%?e-kHh|>?KBZQ=&=g>$q=Ov#|6Jd#trOp zqYsLS*k4KiEie=1fbXbOQ)wA~MWI8*#*PuFpk&~dzL2dwTT=#A&)aw{ms91#6ueQ;*NIgty`@uK_C6CA1qHEq7TXCBRz(_)4FkG*zJlPn zAt;U6!;9^z&qvVabomhGCI}&H5e&}-BjDpnK(l6r5m>xeSOf3Z6ehfr?dW&tMvj@# zi39bHV5Qy>eDm57^lE__CU5IrZmY?@q+>0lvRd)a6uiWig1#ZJxka{lI`)ZR;}4Gf zaDc(!e5yE?>SckmK{4t79daF|lW(ipUaEpvh&-Z0rJ8~H3H~tq?Mm7NbgJ*k*^(>O zJl5{o>*JbJXl0mExCb!}?K%6`*e>$#nYb-%XYD|;g+=M@WwhbN#! z;_%BZvpXu}UVlo&p$5hQ9iT3E)A4Z8?N-hbaY}5xdbeoyw|cfsnC9;7ZAhYGn#@{n zfI(D&UJ)6Fchu+aR?PlZrTSL~&=&Zpd*B2atQfR)8lJLHuLQ zZqnw)S=V(9p}APua;*OJMRf;|626Y@85`CSY`1W za<=;*t$|rOVc`F)VkTvKVY=k%#XYRf|Ic{CceLj^A+F95=p;6Otv#=VnzeKeA8I)&;OD6Wwd@5COFcZBBy@Hny{{2eh{^M6Rh{fU>V{tu~rN^RLW2gI5YCQ@)kZWg*yWV2sUpu=#8A&_V{X~=XG(QZbIvhp8 zDr7puY`syj$^;%(%7WMISgNm>`2)WiIZLj;42!{7L>n=%3}$q|_XmogWVp}8>%ZsM z-{iZHLCe5Iy`y@|@^Wk=^Z#>^Y7Nov-S#q$sb_0?1fEaSM6g6L%Hl10mY{xi&_7Aq z&uFUe_si==PKdYCt8DDO&#iU3^J)rosAHxvt@Cb zXN2DYLz8ET?^Km6H5Zv#gX5s3AJ3|0K z6+bCKt3!6Hh_*ZJq!!X=Ry!F#2XFQ}W9%0I$d?NK5k|C`k#YQ1{k~m0mcCS5r!Yvp z`BD&K-`-5tn|+vR0NXA)2rF2d^Og)WX!$IkR4e%SA-ob;T!qaYu3C0VweV>e1O!|Z zI`3A;WlT={yp3dADBl0z0`oPScXfPlfi0VJgcfFnEc)Q6IphP8Smnr|N{osehp2BG zXR_UT>(fIdWY|#M-ZpOeZd`xeHjeEt7}sN3a#7{bc%1DO6|wC0dta(xKUMR_mnwFi z?_jr3qQ3H#mfzk})=nCt%2J?X~l8eeL{mN9}yva7<%A zf30z|Z`Zi>M~xePC4(kz-m4{iYDLwYe0BS#+o-&I<5LjDNo%pa@ke002F0-#$DGytCie((C$rd@~h#89psbIaJ^&u;0 z<=g73Z08ihA6bRWj2x6qbLBfdA8*6*@n(mRcR-c;cY2k&2d7f^#^qq~#;?p8PU6;t zHD)#ok7MK=UxVPfPujl*p_8~p#~>eP$aR%H75BxivgTF&NcNim_#p<=4(dJzf#vJT z1>{id55*J4med8Dzzxag7AAz&N~!~tTpnwr?3X zfBL8RC8zerE$&3&=Q~*#%vlB&cmD~^zJPT`y3dj9JPS71WrNk~cdS~j_3Zf%NOn(j zIeN7Iw#&idvwNAYbCq*5XcOigpG;v%_HHN;*C*kt+3}O0cbC5~>#j3Fr0X$#yYU4%8s5gZYj~Ik%nq_3guVGy!VXyc$b=4 z+_m~f9d6l?WHVPvN6Z2fj#$HjgI=EdW(WAYu8XDa*rs`xxi~o#yt@nat)geKRrEU* z+3<^N`fq3!hwpWClsRKZnT}bjfz(xnum~mO=dtlbN2A46!5W#bN5$+X>%?>7`CBry zHW!j*^NDGq0qf>o9dlz&pU=HrCCrSct`%qgSn5CTlVJe|BrdM&T|f9@`&7gJCk|wl zYpJd&;N7=?WIF9osAcIV6jaxhblE)xxT)0*En9@KA3S&kuW~eYxCKhm=BW5*W6^gjaRS z5f>o5BAdw2)K8d}&PdKyFmt-MkuFQ6#F}1*W5KhZ9a!N=eRClwk?@~vlt21|d@1Q< z*evPuh%y^WQw;6vnUuZ2APUPIL=24V1hV;un1V5#aL_4Aazi;S%N8lfLcH=pp;WKZ3nl>T|#e#YOXjk|2irs2}rJ$UH4)9EC)}j5)GMzZIC@wF5O!w zufF-zZCF>X5$*2M95*70+=l;x-@ODH=i5SJD|Jc(4wE0j7wq7+@PY~H!U%LPYonwN z!{Iw>N$5b@N{y{6R<>a&FDa*Aq@1v2!i(psxuG+Mk9En?9hI^(E<$xPbS4z6LweTO}MrvmxEJ7p-q5HKiCuaEI>x;25zPWGlH0_-KAuKR;HqP zY38mFvOAXkrpf5laCXL~zz@)h%pB-QIduv`^b6TcMZxeJFUhOyGHPX8Prz{CKi^DS zcP8H zd2YFf1COQq!qapaT%I3e~|MNzk=ckn>`OVh(Hy zF~C8?G@NUxDo_ifTL9RlSlJkQlD>uM0_3A z+!`UUX_{{4xVaj^E9iqSkFEf3(gx}`_X*rp3+@&Rw|x%ltZ;u@sRJRjF|OQ9Yt{bo z_@E+qGI&f2@s?4S!cJ!R?rLfiWZq|7GI3R>%3sBexD3vjE;lSx0`zX&IrL_%$~Elj z;^{AeA#a*ZXBAIp#}7GIjI){q`_Vu&68240f*i0~MYFDOxC&L*eU>#-`S%q39C*Qc z1I36am?Ec2FEa74B28w@7l|b^Zj6+W9|1TCn72_h!nFvuCtn$3{3hjVaKrOf%6ZM(=*mA2>{d{M`y_6Qk6k;(mcI zggly`<``^{!a0og)na|89QvRiJa(R(%Y-PX-N9PX1Xo&i0;UuYD&pfzwa_4D^t(=A z9azusV$ccW$wtSZ!jI<#SqDi5%XWPQ6B;74E(JH}Vp6CT?R=mmN+(a1(BASEzfwma z?cx|Rl$0EW$B(XoWA-Fk#I(d6&L_!BaCVag@H=U$(VeIa4f3Vb07u+m*6a!(vsN0H zo937P>5w!Kmq0j1q--BrBw+2^`<^bAK7ob}nXQ6!4Ha4gPuLT)L@h364P5D-p(45` z0qem$AOrhD3!&gnBx{nwLuN2gb{)gLh71hT@L2n`y9x)mu!qzXjAvd1=!`%T6(6U`%60ko5)@YC&Sx2RLX6Tx5L+l@Go*B|jI0 zLEWVknFU-d-=(62kbKuADwHg~QCea>Om1`d6lhek`J^yUE-P*tJC^MBScRrJ0xhP7 z#T?YAFc58`mGhwqeT11?ZO4|Rx17(~;2Qw=p5aYtKLngkvNiC03*sPVvy5?4c9Vg@`wMH!7KF}f0uh!M1jnjjyS3W5pXIC2cg$^dgQ zGu`#}RX3L=9{c5Ulk9&zT|KW~RlWCnzxTSk>VwB#IO25+p@_2bL(S0VOUfe41_7Uk zdq3miLa(`v17Z0=(m?v-8@tNsQzf$05hnsit(8Qu0TWW#xfO{}l#!I7(>sT6hHZ=g zPE$gEI5l^61K+Ke6&6|>S2)c%`OZZK5oVUUVJ=-cfsPrOMGb>$Y`i<)IARKjTWRD$ zMi+ij=54-(c@O@Q#<8%w7gS4#5ccqasSh0}g3DdX`93=tDIlW9RT~z+L-CpgBEzT4 zM*D=AJgwb~G+16M0}EFJ)=S~Lr!A-Z4#d(2}UfS7BjCA zR~F^~adV@k2)zihMj-uNY$ zIQt~9~g5-Pjw)0T<47q$b~!x&*CB3<)r&4Gk$*EAjG9R8Qm{(Xj6JS$=Gzs zr=Nz@DhQZQ$Fx#!QyR?Q-bmGNz0Xe^(vIaFv2f#xl}Cg`y`-Phu7D0AXb0J0Je9;K z;_e+1b(MT3Xom&X4>c42woYyp#%Q{r5He&teZI7`!7Bh+{p&Pkc^)oM*TluoxTr2j zpUn-=C{l&4cB_tI>JS4vkg*^cFRCD^PC<=gCDtOjoxT;Ds9}v#oQiXZfMT^^-BZ=; z1VV4ObW8(~1hw(n1#lA`XGz7+jNWcRhiIC-=0}m6rxrArd<=7%79aOB!igT3%V6Sj z)@~q*=_hGwjNS0Sk8|q}^}zgY)=65FtPB(&+okns!@@IRqD{_f`-|qeFf$E2UzgF~ zra-owt8=xe3J8ZlqUO&g;br$=;Z1XUPV#HQDuIu2! zNyHSpBAq6C)el^U>=-cgy%l-#aQ|KVzsYc z!AYW<6=1x>MZ9H@^v4A&+BoyHN;n4-!luGTg`5Lf0AEK6z4`CJSR3s;DTa2(%~G8yBi&D!3=z+0*l9@6arn+_2%6*FTVe9E$s z3YGq8#Vl_Lh6Qz->=p-B{-m^;uKXH({6;wFm3F zF1=)h99U!x8(b$as-Bb)sY{A1*-v`B>cUWI%sl{u=0URNT`cxK#c49N@8EJM=V6aY ztj-;ozIhW(9UzR3h4?yx=A+aSa25O$&G=&N zK_{7(C8{37MlT!xy<%1)U%iS|46J{^4Z2{{K*3mG0)MYcxeaG<91DXN;MMk9OfV~# zU?p?u#zJtY(_g2$@1Wm<+59I^hM@yv`6D9kErKv6OAEK^${YCvs_~9+#nls=7E6;T zi97-sVlvY%(XM@;u74VeOz=CeO71&gMNgU+|1q7=sLh2?`A)}On!|z$ol?3}Z|KeX zI0{}AR4&fDQLDS?HrAd#EEre8%MWiMkHQ!_Mivs)s6|>S-)AnnkLFs_ePSSnsrVW6 zqIw~jY5uO57f*U;!3F5qGXPH~|I~dTO;!bcn05LmFFAadOE$Rq5z2WVLC`~uTJLI? z-*s|E9^vz3E7b^?>M>>H(+_4FsDAGiR~Z(gs_<}`T`x^jps2HvIk#&a!CJ@u=2}aT z-~*Z&7c>R(Vt>GxmvhhArGX&y%>02zVoM2j=Htss{*vVhe#;-9O=ROA? zR@XMaGz@0CjdhYAB-**Xu%34HiE!n^g@Pa=8EZcLEt%FcJH$U>E86M*PwQR#>tSAw zPS@fL-Kb?44Pm~MNK-i2PzGIkW5%(Ju5BJhFB_K#b5BbKyRi@#V@HFEwrbaaANG?OrNWV+%`;CJoPxKDu> zYe~YC<_VN%8M`#rSf82LldsHN5uu zLMk~_671e5!O{k|`VxF6l5UdOo&zbxKQEC63HUbAWYaf+f^jfXAb&IWMRO=YO4W>k z{&xBFofP=(4&7Ze|5+pZFs|)O%GR(fm+ZTh>quxPpT5)Fv5<0yslAWQQaJnQzTI0k z$|QJa*G0znycyry`=*G8%H{wqLw}rf zn&G6r6(^BwNtALQOqNLFeQK5%>74r{{O+1F>UakrlWjVyk&kJHTny1o)Up~n6(hCO zZATi3F{%I-r{6?={s&|G3lAQq5_g)odm7u@@Rk9W`PN`|KdkoGJ#_bLWGnl+NmyEG zq4UGs{@`>_OP9zV(AVT=1)HD465?8a2|vkBO%U{JV#n}lKXUj_4! zj3QX6$)0A#<%0!tKA~1eI zX`BTHl#CL5w=fwYrq`|f3a}h`GF|Ys8pD-;Eo86sgdAU2)mM_rDWV6$nF^44iV3^Q6vqeg7ol>37X0274Mqse y@pWn^45L?s@er(6tCNzA2OGSw%(t|vnnbUnO2rTtC>08TKX&RrMse&Z#eV=Dl}6 - sizeBytes: 744139852 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:277305cb96305da0ee7ee802b2a5c39e73419530a261571cc34ddd3dd9ee8c1e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 685394758 - - names: - - registry.redhat.io/redhat/redhat-operator-index@sha256:7fe7c980185734501e6a68bb5f7cb57eefe0f1909c65459385cf22c4297560c8 - - registry.redhat.io/redhat/redhat-operator-index@sha256:c93cc47bcf8557dd8d37aab97214101a518928a072c586cc5722df93cd6d2e8d - - registry.redhat.io/redhat/redhat-operator-index:v4.7 - sizeBytes: 644887883 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d4ea0e5042b386273d3121a6c35f6e85f93da6c6840fa29fa106276788c8e39f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 540679283 - - names: - - registry.redhat.io/redhat/community-operator-index@sha256:b3744a597a923acfe1d9381e6bbfe6b82a8ebd3b3451c4ed0003a913f42ffa79 - - registry.redhat.io/redhat/community-operator-index@sha256:fd94ae30d488bf0b89056de18d6e026394da88e613960f32ab12b23fe6805404 - - registry.redhat.io/redhat/community-operator-index:v4.7 - sizeBytes: 523077284 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3e54a399638e5494e27ac648f2aba4b47df66b34144ec1e2af7e7f2c0c66f25e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 511495389 - - names: - - registry.redhat.io/redhat/certified-operator-index@sha256:1b7abaff61dc619629ff68fa773166bf95bd7704a54d4baabca5129a4967f0a3 - - registry.redhat.io/redhat/certified-operator-index@sha256:63a5e5903943eae2f411eba44fd0c8788521bad7124ef5d69f12f5b9a867f6e5 - - registry.redhat.io/redhat/certified-operator-index:v4.7 - sizeBytes: 509139375 - - names: - - registry.redhat.io/redhat/redhat-marketplace-index@sha256:67b1d205d7b66917e00a3fde45f88e62e5e1d695ebce60352d60399add9919d4 - - registry.redhat.io/redhat/redhat-marketplace-index@sha256:e1ef90210a37eb50040a9ad3523a64834c77a2c8dce3b9c6f3b1d82c112ee3c8 - - registry.redhat.io/redhat/redhat-marketplace-index:v4.7 - sizeBytes: 499616174 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e8066552b7dd5de466202783e0eb9e6104490ea97af38bf4ce6376214471ec32 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 489596702 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:25c964d3856013e089749d417ccf38d7819cefb39ebfae907d2646727619a622 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 465583131 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0942f12d4844554baef7be3e3c48243a28b2ded4d90e1826be394c88e5281443 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 461651161 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a48d6f31bb273be5fd3a79f25623f4850a187a07c8206f3a0fb2ca5c1dc29b34 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 413958698 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:987c82044e48d224fea27f8e9a9cc06e67b0296740ab4bc3012b2078efd0e304 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 403746612 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6623745546ace99ecd1971542b8d17695ec74e7ecc71f5d9d97d0e77dfc79daf - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 375981368 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23d56218378107f4d5c2620453e16c6e187b8c2bd575d4504f63c22c05542236 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 363614732 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:cd71273c75c785ec34484ee322e135f8ec811288c68d39fbdbf053964e12ea9b - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 349034327 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fafb53903cb0d36bf2ba00a8f9134538f7f44934fd56ce67a5e1d5be8443626c - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 346119722 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:60133122682fb5962b3071c1fc24ec0987cbfa226e75c3f61eaebef2b5e79cd6 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 337659836 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:29de5b0c8488b98c7bacb64575288311f457b36439b11ea6b6028176338e8735 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 334292826 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:651a5ebde571ab8e01d10efefb86615c29ba642f356fe2f28306d3e4b7d37010 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 331477292 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:6323a576b5324be607a6ebf800e5c7f8c3026b82b7107ca2f82667c1827e11ea - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 325391176 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bb3a0274171939d33b06d7b89115e84d9f404a64e313c46fba0ffaf6932cb420 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 325306168 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:157bffd1b59b38477dd927e1861a75b2657755d518d31eecc031648096b9a0fe - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 325142100 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:08a243c031f8010886d0d79dc1cd961e6455d2cd7163f193caefe95c94887e7e - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 324740868 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:95c762bc6149c1f8a67dfefd36dda492ec7df7cdc57e7047efebe8b327a6cd63 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 324706049 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:50ca35f6d1b839a790d657d298db7f32844ece8da14f99f2b55e5b6d5642fd2a - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 322411789 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:889a5bf8772576e18e10722ac13d318784f4e70f9b5328794ff135e74a33957b - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321854749 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:4a2efd529d4005ab991d46b621af6d7ac9b808cc886bc90b36531c4c24c906cf - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321697043 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:61c93bca3fffdd9f1e05af4b29edda58939bd48074dc420fde8139e2576557ca - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321507510 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c925855ddfcc11b2af0e52502146f1e1961c0120a61ba3081d947050a67c7e92 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321362324 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:db1883913c5182b2b1f70be519f3d458036e8d1345dfa105d088eb70fc72318f - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 321021684 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:171dd5e994149f21b2231435d91d0dd55e9bc9edc76843519218c2d02a4927b0 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 320359302 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1f0bcf908d645edec39d3ac26f5aa60bb9223328534fd5499bc3346ca3e3d2d6 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 318007566 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8d38af588345813b829e3602e8ea23911d645909204f30a884307180237db27b - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 313726675 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1b8eace3431337a140f23a8a8ad74f5c4d96358d9d129f31bc122852a8c47f30 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 312247967 - - names: - - quay.io/openshift/origin-coredns@sha256:26060d447b10b35feebc1c37a71ecf3f5fac3341d07c485728b4381560116a74 - - quay.io/openshift/origin-coredns:latest - sizeBytes: 311537562 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a9a97efe2386f413673459654fc3d88fb0a0f41509a60fb9bbb88ef8152da2e9 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 309908027 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:019a603c12a5285e9c13773f9d0df5f1f89c0e7693fd9afa78626ad11634c1e2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 308569490 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9849519bf4cb544fb0805c69452396326ae319d822764b9553fe2924a9719995 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 307126582 - - names: - - registry.ci.openshift.org/ocp/release@sha256:83fca12e93240b503f88ec192be5ff0d6dfe750f81e8b5ef71af991337d7c584 - - registry.ci.openshift.org/ocp/release@sha256: - sizeBytes: 306124057 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:2fdee04336fc2e8c5a33bf4a7b1de3100ba44f05f4e4e553037ca70a17c558d7 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 305287899 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0dca7062f7e73ad3538b8280d18fc5746326dbdd262f8925c0eb969f9b4332d5 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304975485 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8ec9b905eb42a148e7d737d655d900174377a7756d9533be3ebb2d30bc0f1a39 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304946020 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fca696fd550b9b9488cac87db1c197e904be95048365c989a29816b8124cbaea - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304286386 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c9b9e05d726a900f46c11081e31836e9258ceebec20e6aaf1fb8e036b922fdeb - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 304074022 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32dc51423d9c8fd7092ac6081caf97328237545b1ae5f82b2f7acd45dcf412fd - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 303950274 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3ef5f1e8cf2a4ec7f7cac8dbed1388662a5cbae3f9846131bf562970bb9a1393 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 302997344 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:0f77990f644f7dec30c3e19822ea0291ad5e9e430b4dfe36fc72e8100c817bc2 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 302162692 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:14d2eadf5e7b41c372eb75294e2c7d447731f91b2741928ddfd8e692f270df45 - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 301583188 - - names: - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fbb264d00a1c53577a21cdfc9c96b0553b1797c561ab5e62d943a536f2227bca - - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256: - sizeBytes: 301182343 - nodeInfo: - architecture: amd64 - bootID: 93995072-4865-42f3-b70e-a62a430ee417 - containerRuntimeVersion: cri-o://1.20.1-3.rhaos4.7.gita18cf24.el8 - kernelVersion: 4.18.0-240.15.1.el8_3.x86_64 - kubeProxyVersion: v1.20.0+5fbfd19 - kubeletVersion: v1.20.0+5fbfd19 - machineID: bee097b4d51e4e6da0b83bf7a4421951 - operatingSystem: linux - osImage: Red Hat Enterprise Linux CoreOS 47.83.202103051045-0 (Ootpa) - systemUUID: bee097b4-d51e-4e6d-a0b8-3bf7a4421951 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml b/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml deleted file mode 100755 index 546eec8f2..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/master.yaml +++ /dev/null @@ -1,140 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2021-03-16T14:21:52Z" - generation: 2 - labels: - machineconfiguration.openshift.io/mco-built-in: "" - operator.machineconfiguration.openshift.io/required-for-upgrade: "" - pools.operator.machineconfiguration.openshift.io/master: "" - managedFields: - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: {} - f:machineconfiguration.openshift.io/mco-built-in: {} - f:operator.machineconfiguration.openshift.io/required-for-upgrade: {} - f:pools.operator.machineconfiguration.openshift.io/master: {} - f:spec: - .: {} - f:configuration: {} - f:machineConfigSelector: - .: {} - f:matchLabels: - .: {} - f:machineconfiguration.openshift.io/role: {} - f:nodeSelector: - .: {} - f:matchLabels: - .: {} - f:node-role.kubernetes.io/master: {} - f:paused: {} - manager: machine-config-operator - operation: Update - time: "2021-03-16T14:21:52Z" - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:name: {} - f:source: {} - f:status: - .: {} - f:conditions: {} - f:configuration: - .: {} - f:name: {} - f:source: {} - f:degradedMachineCount: {} - f:machineCount: {} - f:observedGeneration: {} - f:readyMachineCount: {} - f:unavailableMachineCount: {} - f:updatedMachineCount: {} - manager: machine-config-controller - operation: Update - time: "2021-03-16T14:24:17Z" - name: master - resourceVersion: "7841" - selfLink: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/master - uid: 163fb080-767f-43ec-a514-6b0ce0d8dc6d -spec: - configuration: - name: rendered-master-d1a10ddcc3f6f4769b2864b448ece168 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-master - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-ssh - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: master - nodeSelector: - matchLabels: - node-role.kubernetes.io/master: "" - paused: false -status: - conditions: - - lastTransitionTime: "2021-03-16T14:23:25Z" - message: "" - reason: "" - status: "False" - type: RenderDegraded - - lastTransitionTime: "2021-03-16T14:24:17Z" - message: All nodes are updated with rendered-master-d1a10ddcc3f6f4769b2864b448ece168 - reason: "" - status: "True" - type: Updated - - lastTransitionTime: "2021-03-16T14:24:17Z" - message: "" - reason: "" - status: "False" - type: Updating - - lastTransitionTime: "2021-03-16T14:24:17Z" - message: "" - reason: "" - status: "False" - type: NodeDegraded - - lastTransitionTime: "2021-03-16T14:24:17Z" - message: "" - reason: "" - status: "False" - type: Degraded - configuration: - name: rendered-master-d1a10ddcc3f6f4769b2864b448ece168 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-master - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-master-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-master-ssh - degradedMachineCount: 0 - machineCount: 1 - observedGeneration: 2 - readyMachineCount: 1 - unavailableMachineCount: 0 - updatedMachineCount: 1 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml b/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml deleted file mode 100755 index c3ce11450..000000000 --- a/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/cluster-scoped-resources/machineconfiguration.openshift.io/machineconfigpools/worker.yaml +++ /dev/null @@ -1,139 +0,0 @@ ---- -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfigPool -metadata: - creationTimestamp: "2021-03-16T14:21:52Z" - generation: 2 - labels: - machineconfiguration.openshift.io/mco-built-in: "" - pools.operator.machineconfiguration.openshift.io/worker: "" - managedFields: - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:metadata: - f:labels: - .: {} - f:machineconfiguration.openshift.io/mco-built-in: {} - f:pools.operator.machineconfiguration.openshift.io/worker: {} - f:spec: - .: {} - f:configuration: {} - f:machineConfigSelector: - .: {} - f:matchLabels: - .: {} - f:machineconfiguration.openshift.io/role: {} - f:nodeSelector: - .: {} - f:matchLabels: - .: {} - f:node-role.kubernetes.io/worker: {} - f:paused: {} - manager: machine-config-operator - operation: Update - time: "2021-03-16T14:21:52Z" - - apiVersion: machineconfiguration.openshift.io/v1 - fieldsType: FieldsV1 - fieldsV1: - f:spec: - f:configuration: - f:name: {} - f:source: {} - f:status: - .: {} - f:conditions: {} - f:configuration: - .: {} - f:name: {} - f:source: {} - f:degradedMachineCount: {} - f:machineCount: {} - f:observedGeneration: {} - f:readyMachineCount: {} - f:unavailableMachineCount: {} - f:updatedMachineCount: {} - manager: machine-config-controller - operation: Update - time: "2021-03-16T14:23:30Z" - name: worker - resourceVersion: "6732" - selfLink: /apis/machineconfiguration.openshift.io/v1/machineconfigpools/worker - uid: 5693182c-1fc6-4609-94d7-8ad5dadf58fa -spec: - configuration: - name: rendered-worker-7081c43bf1d6417031eab02fecf83b00 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - machineConfigSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker - nodeSelector: - matchLabels: - node-role.kubernetes.io/worker: "" - paused: false -status: - conditions: - - lastTransitionTime: "2021-03-16T14:23:25Z" - message: "" - reason: "" - status: "False" - type: RenderDegraded - - lastTransitionTime: "2021-03-16T14:23:30Z" - message: All nodes are updated with rendered-worker-7081c43bf1d6417031eab02fecf83b00 - reason: "" - status: "True" - type: Updated - - lastTransitionTime: "2021-03-16T14:23:30Z" - message: "" - reason: "" - status: "False" - type: Updating - - lastTransitionTime: "2021-03-16T14:23:30Z" - message: "" - reason: "" - status: "False" - type: NodeDegraded - - lastTransitionTime: "2021-03-16T14:23:30Z" - message: "" - reason: "" - status: "False" - type: Degraded - configuration: - name: rendered-worker-7081c43bf1d6417031eab02fecf83b00 - source: - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 00-worker - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-container-runtime - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 01-worker-kubelet - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-generated-registries - - apiVersion: machineconfiguration.openshift.io/v1 - kind: MachineConfig - name: 99-worker-ssh - degradedMachineCount: 0 - machineCount: 0 - observedGeneration: 2 - readyMachineCount: 0 - unavailableMachineCount: 0 - updatedMachineCount: 0 diff --git a/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/nodes/ocp47sno-master-0.demo.lab/sysinfo.tgz b/test/e2e/pao/testdata/must-gather/must-gather.sno/quay-io-openshift-kni-performance-addon-operator-must-gather-sha256-0f7ae9cea65387b0497d6f9c28781e40f457b0f79838d65881ffb1aa779450a5/nodes/ocp47sno-master-0.demo.lab/sysinfo.tgz deleted file mode 100644 index 485450e79a5029b1d12fa3276ee816d8bcc3b3dc..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27381 zcmeHw2~-qU+BWVnQOTGEaqAct+}jOtuNXg~29X#?5x2$-#H|$tT!4yVU?vz*F`_oo zu3!Xd1QZaVLxELT^UMY3O%5l7e-?3mBY1C<_e&V|<7b;ABYrqBb^9oL{{^mVb0*kI zE$6z@ ztqURUTzLJaDbl_wb1UfY<}hO^Tyk-*7JXbL0x zL7BGobz(DB)`oGr5_eNeBScbZ@W+czNDs`HYzj6jpX;^MIV`Y#sATzsw2K*;*N10j zl-(G4J8h`=Qtl=5kIUx$)p1LttRJl$!B>d@G@sOCnn361+ zg~nk;kHx9lo!c}{G1pg$v%knxDTNh24^}6yyf=hVZt}JfZ~@XBxD!3xE?1l3xO=j6 zOQ^qAQoboZVM#>f(|PF(q?c9i-m14XaJ7=jqLCBDIfZpqH7Ux4Q{uIITUa^8=xdwL z#g4Qa;-b-=&1g`RM@6iZn%W6hnw4v zOqdIqsw%_kZ4u7XF2mV7nXdM*F8?&$$x0)QQ26K7C7Yj@&6rr>aVon>&G(rNmK&X)Sb@G|3cJ&KT5ryT?0>4pV1Jm_f9gWxvZGpG#}0 zkj!!`-U|cwbI0o)Cu%31Any~x=+t9h)@06sO5H@ zC7ShHudeKVEH&fVh6&~eyr&n;*cufwvWvYWZ$s3L8``^D$6R0P*>|L$L_#fo==0#k zYWE~>D*n38ZEL-1x8)LDFmG1HjdAC_uMg333zjx9LwROww5<6o@VH2!!t_0;oV zjAn=THeb5f=c_`l(bC^Dp$)D5up-g z=fHBOm)j=yQyNM(P3pfqLa3-rExdcGdpb1lPBIS`*~X;k3z|wRd^ET$YH35tTDPmo z`i?$%6&to*HMm9fmRSVWJ5J)8ZN(B z`Gjrs%2~1EY4yuZlO!v%Wkb|iHFV#D>1e6)(njgBy&r}7sH>c&#|Ku@-R=e!Lc_M} zkC&S|%Z~YrhJ>f&ZJlu=xX3VFd1JR`>zKQorpvjARL7$=reB!*`~`m&+)Eh>3c-Pf z9zF|_sb1yXz92tiyPPW#(PM^B>Gnk$=lIt#y~g5CTHgbAX*T8Ji!r@^(EBXNaQp}U ze7JqgB{JxxRL8&KpdYmjdXd9Ht8vgIs#o5tK`U_34LE2P=lJl|p!3Q7fjxX4WN?l< zZncd%g~b8pPwnDo7CqJ6% zH-7(U$7*sZqs#e6&SkoLO@_)H$tNNfD>AcmL*+Yl2Is{S3Tm}5E_^Z!TeVe=F(vpZ zj7x-d-jG?s1(mbHaJ?wMnb}lJ?{bCA+%5rg>S%4%Hd$w%7ZQEIQD5&}k4u`bvE_;2 z&H4$RdQI)sEm{#+UtZQUA~!Jho~V20^00st<%<^V*g0qMTJ3oHUPDTHQOFV6vR81d zRxzzYSVvW4_yi<8KCop*c~z$VXqZgstIAqCwqLi-sSjLr8xDD~7BP29yzY+N^KzI> z84&7RH@j3Esi-WVH;FFhTGwwlRDOnj67e-%>nU2`pe^x!C))43-dlW~GpiaE_C682 zcS&P))fQQzdrXz7GIVY5t1FGkVpg-pDyw$8!4-%Q*J+_p=9@V{RwDd6Q4P6QTx`mOk}ic=w``6Oo;7y|24%@$0EZVNm93ofQF=y`NE=xRNY3;rA zVH|T$b$xP@*FRkH9v0Nlmouk9V|q^Lj0AkliUYneDK(#F`q;JPno`V%Zfxw0m9}OI zm7k!_FKA(@U`V)sTJ?rW0^82^{@NWeOKdBHOsN-%(2t86zUO5r^fp8<@!Hu^?`_Zr z2+K=1U(QSv(|sd`q}DrH7DR*wYQhE3n0tBY-Oy=s#q?2CU|{TxJnIz~%BwDKt&X@y z{r$|P%#yMu=cF&~?%FYNU#fB&bH#HqCot@qb>2Cix<23KUca(hRuyzFh=b39KrOmuO#O4yha!8K-HOj=Urc4 z8`|A1x2T|Qig13N_pHT1no8L1+T17lMPsfoJo@R38o5IK!0GyN(`%VYrZY;Fpi8%t zUKttPkh?Z1LX~K`c(L2A-OVCNNo~-sg}f4{?;HmE=>(-ug1K9^-#OvaF|@Wm?ZZ9# zxgwA|TAsM)oSWG!$#55!_}l3zDk;`3@u@DRu{g|-8z7p)<~7$Pw>*@2?`)0>YFJ^2 zS|T1&s*q9!$K*%zgrVn(n=G&Pa}lvMerBbIxrqtdc%Smxuw?c$bwjTdmiyGQiQG8D zn1q$yy3;EZAa14?!_KEGz7@LAuzlr6-J7Wq@!?Q7(7U9EMkYC_8xb#q!i==EwXr9> zgM+&ZM~AAbD&k@acCckun+NUykCPQsHR4N`)`I=aFFn59*#9}5+)H55{&W)OKPCJ= z)4s7z(nTi{jgsoKXqzRFcaT$!3xB}uYjis{wqr|q3>Q;6{amggK+Ejc>zF95fV2BN zqYHjC}oifc2d#(Drnm@gWWA3onUkON2@q{ zvjfocC4TxMtg$soya&B2=$`i(asO&q^1;n0uDKa{*V2b>a#C@vhQ87L=4drDWgQpx zY$hwpH<*IO-V~*ksRqtg&@EKgd*G@53hFE9M4c?tS0n0r1H87&z_4Ead;(-1hhaOv zf*?ysg}HakVFi9vxSms-AEvXvX^^*$5&>TheZXw2)rk7mn4hYkC#$JWrqyKp!JJ)t zuI*#yI{R-pFEYYKi1~7RqwE{b4quU(_IUi1EO@tqez*TaSeFGa>DIS1;~)DOkT*4p z1^X)?JM#rZCMclZX&F9eu<<$3{CkY#v=wJK(L;mmGkEtnwEhu*FJ$ptX(`2pD5de9 zVa?2(co}`q;CH~_H)o7NU$K`HE#^8@(u2oW(oVFU!?AEwoE zPPvegg=xYd72g{F`k?WzU|hQwFny+-Ms$W&;C_9pp%>qS52szE|DjcG;Wlnp7n(z- zC|#w*Pe(6MQ;ovCS}EBo>nHFz1axcyPh4#szuMs^jH`VW{2=iDn3hC*q0%gHSJ2-L zDd#-8%C^ywWGSvfI>&T;3dsoydV8$%j+0=?KN2!e!WGV9uUg5km?f09nibw-32RvU zTf-VWvXVHvgG~p}CVcgxLDMc8ZxZgU=WXIh1#R9;&2i+~=9^Csj)F?b9#-0)-&^z6 zz0HP2XgoVWZ(Q^My%Cr&AHVEQjMF(9oS^5Y=mwbBAR`BNyS5SbHJgJrL@1mg>u+mOA0@)PY=Ts7znApyy7UOAHra28zufbVhSe=7bwT=EREHtI&i_LlCZ;!LB|3sM$qO zD1~)sxoBS62pBm3TB{A#s^AuHPzQ`RSwiN6R_~@ASZNtG=QQNvd(k&qm}6?D!kwGV zB@&lDsugXY3Tq5O-x%#ELMzY(5rh8>gljoPQ8vWG!uD!P>6s7pC2&0p?$AM7MPN1& z2EuIKO&S4h`MJnAMFt0`1b$VWS%Hv~6&eBz9=M3~Dm9f4gN}CNBFS4>!Yt@X3?2>n zj+U{*f20SMwc30OGhfXd_~~uW(BE-J8#zC2@M&5(?LHc+D1{RZ8rRuGfm7S*I`&4$ z=Is6l+shjdL~)r<)QoL5Yu9u1=+V7Q`o`ozUfG5z%o1em?bymi$sRzTa0S2pbr$xt zXp?V@xO3cxT9=hhyQZ|fOt)9sMRGSz&Z|wH&?`9D$>m9zG`YzyAhl}K&KZTA?~B;c zs#t^oj|DQ}=po@6R+^~Yy*rwn%Je(<)b6%&T1(xc1wmn&ECu3>_##&4IOY+TKy9)Q z?Qcjh#NNHTLx00VT^uBu{^R`2Ayfivhl#<$3P@&yT>>qwwQ15ss6IXHJ6(#Jt64oo zQsg~Y?msSxeOyU-H0K2lmw#7S$5v{~N_WI2=$#9^Po5nnhUw1!5B!8of?SlSeaZ~| zhff__u~GDK3Aa&cYcEl{Cx3P7Y5sFs7AncmZfz_cL01YY{I#a3nHiPCPlX&^w_RV? zz-~=2l&q=HxH$YZ&T=#UaGB*UvSu^ zAKwy3!Hy=iCu@DUmRrHa$A<*C!v)H#M$J?zWyccf+QPKFyd7#qRo;#m_Upw#Zd_Gm zdDBKoM#~FR>WSx-yO(mO*u;tS((vL<*Lr!4_FC|9p3B1E58c$yGAS8jxU#EdHseK4 zhba9v@8c1ufe4eo&mGSEP$2RRuNS7~l@+FKa7!vGVzZ?5NY2%3ves~JL7u~cC)|3m z4WyaXK~d92YWepb-aD>|^oH5l$t~^d1fIqI(po=J%QkkKAx~)7k+ihdbJZ^Ow^bAc z*1LA=OFh+BvtD?{g?cocI51IZ8dUy-TD9<0z9vYN$uRxeU5Xwao@?-zt%F_q%K4dR zGv?ow$kO`fEW5&O_G)xKf4P~C-pq~AQ|ZoZp{_=&*+zGMte}MB8(@&y+~bUu5>l9zso4B{=CJY1gs3HOnCMih`0$rrCAY zO02|UU=r=F^|htE;+^$}9zwm&`7bKbQWgEo?>uLhN9{T2yyFtRSq3u&M|9L8FiZ}4 zaDlVl@ajSdUigu#XokJy11n)#iC#I46SNHY5lS_}l04;%SAPu5fBi4APw@|!`f}FS zkggKVj$pJb|48xdm}1@Wsh_#{8RCL9bXTbg+&$eQ`kijl2=DqDx^xxc>+JA6;2awc z?qIR`4g^IlgSg7vG~K3YMbk=RlP^)J~_imsD877!>^-pNn-*T$qIA^b_TFuRqzf;XT&ZHxH*TUhbllN9Si=!KC zbyK{&PAtls8`3={H@DHCakZSxgk((d&ewUF>X#{z;sq!WthHB{+bRN6L}6-`=)rZ<{(0cJQ?uO0WF}iD zg~%^!;pj^dsHcDJaqpZ?M8|taI`7v=Ybu#i82pL1a@r+uUfv!jInslr4;RB=LrW0F zy4}C=qw>6uAi1BRFm_}v1=184-%|V}5!3>o7mcx;hwtC|Z$AfXPU#Z|Ze~Cl6bwJ0 z!I)kT4{0{XO!ow2UpCN#Jd-&2o{q6zYXFk4~maGMF~e6GqO? z70-C;`7E&Ue1E0k(@cM-B-XA#z%7py!?I(cmFHqt2ITBGSz@m9u?Dp&yttrU)hETx zumwDF@57m5OEIn3-w1u}KfACZpI!n#I9{ZKH3EbNH-g0@bpg@bEY7E6KJB^Y0ew^> z$c8T;iNGaig4b%+VhtN+x1>?9hPAN4B#58+ARmhDKMTWw;=y{~8E#uXP2ieq@|EY* zU7aa{D@Qin>SK1c1FQnkEk!;ZosR#n(aF1poS|^2b*vvJ>KXwpLn0t`;OU&vDyox; zdbgX#ECOCUfn`5LfQ@5QWgma;Tm)?87PePWWbE5IyEHvNUIUJoydofHA19r$il4C# zAqrP2iQ-HHzL}pG0f%5n%2zOyr6OScq;)MG-&@0ie0mZL+p7^MjZh%TvD zi!6#DcZ6{rKt|T#gB-fQiW6khbH_ZCszq~YRX~s$2MLDiE1UytGyW>$C>gAES$sfD z=>>NE9bi~IB#4(Z%C$1CT8dBF-zfUWQYbbAb)3ach94T^ICH0L%a7UZTsWh1=2waT zl@Wn9Rhn6NHAGf~xtYqI?UJptQ${_soM1$nuCy#3V z6~a~2o=RrR$j92DnX# zU`*dpVM@=)iFNJ8)6Rl_Rdsbx@dnqw)#&Hx>zQFEkJgL!Uf}$a+ubUKYy%~aK8e6~ zQ#uH^rdc|_SjU#;^np6j!BFQ)#gkD=vmGzNA(fT(42gs%Q|_;lKYz*DEv$t@jz|aX zBy_63f0gHT?d2!q_tk;03AQ|iT_F!()LxcSPJ0NtIE|C5#vosB#VO{V{9#d+mg=h4 zFy<;LMgt`*O3~VN7s((C_9TnX_<(%7LAu|f8r&!JO`>+z%OoII`RYczlaJweMu=he z{pQ^3+^(fLa5-fu7DLkX85_n4SoevNYzt>q6_qt>*5gZ18p^FJ*b0IBudi*YG>_rJ zO0(tfs3_$Vm}`>_b5955S~;+7I|9n}%KY31TivI!q8|2=D%2j9g*@O`(MOh?sC^x+Jg#B9spYn} zozInP917PwtI>!aPR4~h4*F1kPUf4r7-zv0sbO**-PPs04lfU^fKd|-?(1dHjrAYe zfc)m*SQV`fACGrBzVX2ZVhQ;T5v*0Z|a=eU?zA&1?o-~ue$ca>}4M7Qa# zFUSkgZXj%?z9h83iS^gh2{5-qBAq-|C3*sxI%!7(Bfh7zZ-x>W{E}<_-T|sPQ8q>! zu1+#q7t{z1T+nAA6Ti@zHN#oDYa{4zk=8UJ2G9xbGy6T%8Yj2ukLQ_fzunI7xJFhB z*3k2w1_yKn|BgCb?}xPao$)z6SjJK-?iry8YKC!~jUy;=I^lXcgFZqiIX{JH5Du6P zkz-YGk!2E_=?~XsvC5uz8)Kc`dFjtj%)oTKjv+apxeh^Ug73+i4`vzktNWnplgI@W zz_Pk#*v5%&Wf`PB9GT-9`XQ%G;+W{0VAHZ|M$+;5w-1=i=U?9+f*d1Zfu8ozGnZn| zz_QP8*9L4npcD4k^~K{0V08}c_ws953rENr@RQ7efyC(fS6#>2eZH4lfhI);X@>8j z&w%Lj#by@s+8I`pNt%IhZ>_-i;c}cjqVTG-^jr|4ZIG3WBr7?w5v*5Fg|K#yM)6Bh zPlAxfxD$BKb~cY?8flx;ps=#5qWRj0&AGb*i_Lkk%)%uk)OHrg+S{a`T+bB!NQbxr(>WhJbLYe5IPMze{krGOov$&;A;Dgwpqo3G)M%tX1j9pQWNQ?# zd-n`Uws7jbc2inl2=b?wknAFZrtRPK;Ftk0s{=9rMk=I#v_VnKO7n`~^4?}fytIlb zY!LZ1(@)pF0GnrQU7r|kT{FE&37u5X8zq=|I#G|=0^guhl_vf2;Zm@k8T<^qa+gBC z3jV@(dTJ1{JzMbbpo_Y9tQpb)y(+X*GfjBt*eY7>ZE1dRs)tEzGgu$wEXmE*usO`6 zA4VZtlnapb?^+H8G1CWbmcXIBT`RY7HYp!ynCYPJCSNatA>yyAWuGj8LbbAs7j;=B z4O^F{wd>Q^y#+j_=u;^Dav*8kX%i{eL(ZmZ*i^1Yt z6VyJ11xLF#(>6nMb59PqDQvTIG%({UY3Z)dl~yYng;jtB4&?00M;iJbr`*jz06O}X z3fN(y$M{Ib(!dX$(7)yCsiE(ICpwE2A^#hd`7d( zWR`iC4?eCC#4kZUEItQI}e|t8ub;7mq(Yjf{?2i~d|9E?h%=9)8ve5fi z6P7)PQ9FTONq4f6ac4l-vrtQQzqzF?#u!5fhR^&Kve_8)a97F2kQ{Kjvi#o1L0|kp z&Bu%uH$P`uR?bmQ)ajUx={E{Xa}OG>Ke-0Zw@T@Rg)pt0qq|3dHuMy?Ow4ZXin;Cq zefR4v8vnvP$j7tYYiRnTLbXahD+2TLDixI6h{;|56z@o2=0zjdHq4D640N z+ceR9Qsh5^Ya}_UA=0o{?pzqv!SR+(nMwcn^;771_h`GD<3}|rC!2&OapN0>XYX{S z>oM=+z_)a=rba0)zOuX3tj;D3KlqRcc*4no)uK;dXzKc#d=Lpwb>hW`!J>G7iqdj1 z>)d79z?%E8EC)uVEGG%PltI732#q!yq*o3??GbZb8p{;HkiDD$XDqA*Oh&~0-AR(4 zjB++cW;{D<;lGr@@zl?vX z(f`9abq5}2@cz$724iHpq7oItvaJwiOfCE^%%DSDg)u27?t$407Gat`TIG`U{xdUv z_Q4QH+~Cc#gn>@;>Gf2Lr>$A;x>1&++j^K=Qc^#$qZ~(3c|4 zw&wE)%;!7#d>)4RJb^=e)0)p;Js9(O+Y)}h*RTRJAsGbo`K7b3^SOe#20`e7eBLHY zqoR2L63BI88FOPo5B-)4cm##_!R+rf25D!TB__KN_~>EM^(EBNZ--!x~}um^`Rif8}KG_s0~Mud&nz{UP&|uF||2HafC$ zt5F)IyGm>`6+F(tryDUJVMefhkM)>g@;)-ba}ZHt2vXeXKYS$Uvegf*K zXe9j_mAiY~e+ogvv;q>-u2j*}raNk7sd{Q-JS-!LJ~o#QD`!TlXqUSfoJknKRjGqV z%V5L?H2Fh}cvEXyU%{t!yhuiRg%?Iof`KH$oa5?hx%fWsEVb?YSjF5M*93!3qmMN* zyO@*R(pOA1d924iop_i<=5?x3>H6;dVpbFdVZ?w#?$b@n<#P%vsI_i~^Mm2*mw#o& za_G+;Xwrzk8-9*^_ZG?WUU#e>62gE;<|wACDI@@NU$`~M=1yj*rQ?`thu25A9SV_OEmPIRo}$gU@lP_QF1F5 zc4s#dXztJfvk`C0l5=#YJFVXA0D&Z)u7DB(&F2z?#rsbL%q1{=zRvy?OkesO-4G#h zD;juQqww-)VVk1-#+fxvB&?4N+ zl5mK@rJS=fW*K7m&Gg(mhnbBpbd(d+)v^NXMfF_JSD|5+v%V3%&?%Z=F?#klZ0n3s zlpllX!Ge8HM~_F#L2QeLD!2l1PwTK2!|a!6seU)T31mXl&k7uV*v8rYB)hKw?4j!k z!jzQ+EZ0WDUmdI=ZvTNN0$ff}-ktBx7Cv!R(j>qGMR8w)q6okVATiVSNEyNrGXc9N_SwfwbZ{=5OwTKNo%y>t9Q_ zdIZW(N<~O%&PF7_0upBMt^m5~B`5o?Hxe|AGxv0L{UK*2Sf9#8O#`##_2@nB`X_T?W1{og%dTFHn8i>)781O4ToB%W1ze1!~&(yWiT|^#;O?aT6*2c1s7?K?{ z{1Q2%?IQ~}lF0NIkkzCSAr@0mn%U7!1}VJn?sC;?#`n2oX2Q?GuY&)R}p98+}a}{==IwCB0&?s3)Mf zf>iw@mg)3U>ihHLj#T6-c*sCD5^wenu@i;{jve^T!h1w|Fk)x&?{N_RWgi_gcb|H0YX|?)}Y^R|Fr)O53(z+$;pig}1PECzmh&HMTzPH`w~2v$W!o zh6xFghpIDs@bo<6KGPAU989rn-_t%Iz61A3&X^0ORRc0JI5mm~=VWUJZp5pXqyWMmOP9dAlhQuxK*UHnMwHIU)?;g^!x)IYcW9 zg~UXvw(~ysz`ad6VeURX#Z%GN%zlc`?4Qo&Q9;#NxVG?{9(8ad6d6|?t(+DKKA#Gb zKQZ$4r(1u?*Ij?f*Ry`j*D+>{Lj=kB5Mid0xeo*O>Xk&_Tc8%X6HV_0@bg6O#=WPf z$jiTC?8lAfs1@=m-R@iOJnLnI1V2gb`7riZshuASQ~MYNT;x-`%PcOaqZ4{XH8r)& z+$D$;p=t7lxXuuClOv%8t=AP|IupsIjTkpVi&2?FkYS{h2nac!&M0IU-3uWX;ThKO z9jDy)inL!t(!P=CK(9#q-+xWow}JTkpZN7X&a+j5Ll%rk)yRT>F2UhMUrTU%4Hjt6 zL^XS&r@o6W=EPms?Or;*BO$@)QmBHm%r9qogu__D`}+#OfOEN-P!M6U6KB^Y3Uv-@ z1Jtbqk;5}AFHMuKoM*A-3zM+dKEum>tZ30O;w(kv$J!h-UEz-!fTIdFnq%ml(LE^y-6{&{7aVsC7TtAP_QTST=F zDZ2#VIkQU`pt&CXBw_H%&VE@N9a+9PAZ((FWH2c#d9X1nV^# zrFW0}DPUDbuP1(Y9_kwpl{4r~5fz-cJ3{l^FX0KF;?+Z(p;e_8Cc+x*GGBrnCpzgr zCBdPrwk8*zJP~+wn3JY713i=bZ1L@8 z;*HLB3ic6ip&uH8k}yjT0Gsw(q05V`9?u1%e;gQ0g30JahEwmH7C zb5sclOh;8vhyG?moXsd`AxbZQh0@3J+Wa1tUbh#5UaCEJk3Dt3yj^fxE=*a@`(YEk z$l=RYjNS_|x^ceN{Cx4X7u{t=ufc(i9`tI)Z_?9u5>$Z;kI3g$6ucbJ}zb zBZugYCi*BFTuj_w6%7dcZq?(Bf(k+}%=K9db;^;){0mBQE;gjkU8aHjxhKOMmfpvp z^EIhEZE&5qbY)y&6U^Ib5PDQg!-{~_=6l-86-HF{aD&XCUdz@@X`QE5y9o0=@=Q;ze)O)60HCPv5C05uki)20a(u@<`1( z)Q-n-L}AC82~q9iILX8^dhVoEFFn3r00jehdP_7lKBKg1YJ5s=)zo;Wz%@S&=|<;n zW!`N9^RLtMPZJ-qbL?Z}(IAnoH;G`dF6iZ!dYi?~TH;hb{HT=fJSjA|b(!HRS25?j({;$Fx}y9Pk{d{%u_k86#co$fku zk|u6&1bEH{>tn^xsrE2S!9ULvJF z)=2Lnaty|TYLuux{Vw>WnH&xKo*^jqv7Day<|I9GFxcGc>ce2ER&FfS1|dkz*ZJ$| z5x42?ugl)d+@Cs*F-@bvpd$XJG;QPsga4Ch+V%};nuz#q0p|{aKO;;gavF9u#O)t9 zJ+1vDBT0A#xrg<(}JU$>7x0lDp7m|@uxiMayok0F+&GUu>XN@)jLZ}TrohV83QDiqlX(2&o8pHBTrzkYPRZ4Y5J#SZ$1tZtn??9 zy`5YLp7Fk#c+vRInSy_4TMVSzibK zec6ph38ip;JFG;heK`RU<<;?=#Cgvbk0-)_aP`?|hM;{G)$|^&**xSyGH3hr;X~2P zC@!v&G1Yl5RZw=BC)HA-IaM;)IMcB~L>MUoZR{rRjM^w6w>GvYkk`^ADbLQL3p8L| z5M@-z(N?uWv?e2}odko!NaHqdii6-=Y>;Z)&k5y^cn4m=IzJcv&*2Za5^T&trTxfa zy;k^^G=1SuNYky(Vw~aIOW(i2qLDeisr0@1?WJ$xAa6k4!~U}MAkXh#B|VHruISOf znj~SRf5f97vD;cbdTJv~9M(qrUl+fTm7oQY!ZFY4QHOZ9v97l}nbyWHgPD$MBY_Pf>}t64wT^i+k>;CMTFcrzz{iwL z5(}4!)$C(Oun>d&p1-Gd+ma(5d}CT4jH36U)R+^ov_+(jHpQu~mD2uZGBK8^jUbXp zwAWLD7Q@!Ne@zyXJouMk_YXO5fZcD4Kz~#h!=Ac7tcyLXA=OSruDW1{cLDNS z)WIg>2_mGdd)PR>^3~l~!^XChY17y8895voF<-;Zq2qX=>_!F>Cz7mrJur@o9<_dT7e6D>mEMf_r^ib*wI*pAr{FKp5=iTctlpRBhsM=~&OR05_x z9Z}aM&EXkaixN7VP6DB8qm1PD(K_+ePS`#x?}N$)dRA7cj`F(Ze15Z95IseRQs1zh zXE7zy(qg`s#pO%(qmJ0i!s$=fg^^yC2t4;q7%G;Kskz1)7vk*WFn5wQ7s(4&q=x-Z zq-dg`*|o~ZM+{~t(7NiFA z(gr_Um3Y22?bo6c!g$oYc*%yO!5A-|;o~8wnCtyFhx$)0{bm*hF1{O;@1{xsYhFgH zrYK&4S02C)D(vmk)8>H3^jh_}x3cKr<^P__{U37YU$baj15AcI1?AH(#>1iaUvUIZ zwBSM`hUe;%{liEfA(q^aHNwW9Nn?Bw6I*_ky=vfnTloshd!%B=H}0ZBLR5@t7a(hO z!tqg~zaLHq$1Vc#sq?!|t|L%lq=uflG-3#LK)WSw2KoMUa1G;j-Np8}CD?>Am5qC< zE9}X4p?sQ&=O9S0BKAU=nxF`^cpJB{gP1fG{JeUF8;)-FFIBHnexrI7N09o@noalp zvf1<`Hkeyn=symA0c+LaAN~j`g z(Kk_?M;&duu|Z&2bg;Ht`;}d*s6D>UI~wTjoE>t3D+U1yUk0i8mM)+S9sxO*Y5Nt( zUj`PXz#=@E{CPa**XL#vC$uR5R0h@G&x9_0ZjR;x^ipiR<&U~hcl#Mcr%GW^uWCA~ z-HT^14iDtuzu_^SGo7P+VB?oawfIL+o?YVoVFb1UrF;dGxB%=G!*q`aVe0837}^ex z&_;1;AL)Y(>~N<#;W>_a+IWil%5%yW!Y9E9<7fQ&&y@3@!F$b(uYku>O7OTSHXEvK zmq2-n{COF?Yml~|#g?{E*#F)_j)Mr&;4mRD7J3b`iQe5Cpp^IihmFu$R(3s#aeZt&IRJ>)LG=D&o4eQ*Z2`>U&Dd< zxMXCB1nDrAL+Mvbj)0J5MDrudScUT~0X&}-_{-T)sGzoP%ljr|TW~tKEr%PFxR+bA~IU2eFqaetiSsfru?7kD<>Stng@BuWCDPc8*eq*Va6s zv;sUr!@tYyyX0nkk^ionfB&6-wXADen3Jt*{}755;8ssB`W6D;k0dK(wAXIBfFqFq z42&DpdVLm~0dsWAXtd{%Itw_*=+b#ZnWGh#(R1)Dlp)|eqrq#p&(aN)E}VQ2ed{QE z!TI4Cr~hp?Pa@M^|MO}82;mE};oTJSUMK++e$P#Y5vgC{>m1?o1+6YZ;mI%FMaU;k zVdItz-wr(|7>%6wefmR4$HQ3YbR9Kxi$=J@AX-#H`g8>raQP;|^V-{|u`{mlGK6nz z3lJy_dM$;_db(#fjRg#>60`N#07p)?p_^jY8*%IsL)=tIXMg#T@1T^kAdlOx!Lmc5 z8Y7BD$}cC~>Pi3XAX6dp_n(8+TMQ0AahTZ&@U2=zJhLllF?6p4S3U~;=HBD-_0bBx z{@LrI4RO`D&={fymNVNx`ZmMiUy}q=R`4pqp>9J0&rDX}ZHV*71{pkGwUx!OP{t;k zbDs`wGjiTGeM!*4FhEZ6VTiYeGP=!rpYV1-ZHVzj^Z?dh+ky91nJHi*v662BF5p*D<%C$VC#zX-=B5-|Z#62u<=A z@gfj#DUD`o49{=b*TXR2=k}Qa&t)9iJ9hG8JVzGuZ{GYhx%uY~^mB`P+mrw3&CNg# zguW>V-sasYpcIh?mimaLEiecw89u^d7_OmTM~Pw39Z;9A?Bm0gmbDtLF@IsOcvB$# zt#I~V5QTp?c^KF2ClT={8;Yy*!`p*EGFzDT|53xQz|Oyb8eRo~V%X4{_%1?f5L|Cz zF5SY`R0V~8V?1Ut#dDi&jA85@x`3Q!T`wSxTy#e(!-z3_~&Dua5repC;jsRRFl6*azd<{f5 zuy4g#{7Q^^i%}!U3M8kYNaFhG5I={=@;{jTem9H!X%ytoO}lSPey{K`p&q}DfLW0l z7Pj$NBr#-q#BXtCq`mmnZEwLX|D`MUJJbFZL;48<#URN?8+6}zX!2fryfKUD-%ewBo!Ia0h2{5^h{Yv#x{c9Z|g_UEt ztmGKX+k?*73$S zKl)aFpxD+x#e)M%hD8wE#_{E>{Iw_u&p2mHkK1AUE6uQ(YJRN{_~@mBdsX8tuLOi5 zD0;0pn%2_CP$FoiEZPJ~9LcC4%mKedW<`ZdBuW9pyvL}UT>VHmnm zb|5NJlbD2ZG~ofuA+@{X9tgV7$p|6GJ_pARj}XDU3Q+PWlSdh_4PuD$p`|LIRVqFB z8fgkm=`So$N|N`yr9t%@*MP*i&Md-!*TXp8LXiI*s#>UAWxy3)J0_g8g~G2?BGTsL_LisUC|T1Y(*pYiGFQV+x9m?w{$wOg^9jR&dgCcth|ig zU|i5S%@oS?{G?&LVUWjb4TBZE(mc0wW=8mSnu|##T0F`H1D2!ucMZSj3pb({(SLG` zyZexi;PvGFDmyOySO~8U`fx<(;se$`UBC|{PU9A2z@wa8o`T)yg(hA<`pT#u-G4t4 zuPxiXl!&F>P*ojRWejN5)M8%CxP@Zi8XG%aoSKQ^)F7j2&ScYhy&9LajNVk`Ld3$c z&w25IS7k545?+;kM}gNyP3Oh%&MXnKc+>5u5sZNE`JH|9>&kGSpVexk0<#lZvH~d93&Lpa40oIY;Y{Gn1Zog3uluKr_>cSHTl3tXyhA+SS^p>H zIrT@Z+^jUU!Q0)X4lWHZ$Ajy$tMNaBu+V^aP0=Ii7!)A0Kz@EfxF=Ry|LC3yd>P!83@*RY~nfuxUQ zC1uY!=RT?yhmcev@Zi2FnUqP4(mN&!I%Mb!nb2gZ0=x#bg)kB(Xa zl~X`@jr+wF#V@W@gLoMvHMdS!8VQ@OdpGDO9_rjoho91gnOB14dD-}bCX+DOh8H*7 z$FF1+)mBsGhA>}$|M$x3j)!TekVvak?em+ZCaYMp#7f!~zh7ls@0-ij$2n8dNP>+M zLM1pnnSci#Kfwdlp-{d$2~UKFtn0Y`0)Eq(!1^|R*^jP^$;%87n3w>Fw)DGdVlXrL EKTbEQsQ>@~ diff --git a/test/e2e/pao/testdata/ppc-expected-info/must-gather.bare-metal.json b/test/e2e/pao/testdata/ppc-expected-info/must-gather.bare-metal.json deleted file mode 100644 index 1470dc99c..000000000 --- a/test/e2e/pao/testdata/ppc-expected-info/must-gather.bare-metal.json +++ /dev/null @@ -1 +0,0 @@ -[{"name":"worker","nodes":[{"name":"worker2","smt_enabled":false,"numa_cells_count":1,"cpus_count":24,"numa_cells":[{"id":0,"cores":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]}]}]},{"name":"master","nodes":null},{"name":"worker-cnf","nodes":[{"name":"worker1","smt_enabled":true,"numa_cells_count":2,"cpus_count":80,"numa_cells":[{"id":0,"cores":[0,40,2,42,4,44,6,46,8,48,10,50,12,52,14,54,16,56,18,58,20,60,22,62,24,64,26,66,28,68,30,70,32,72,34,74,36,76,38,78]},{"id":1,"cores":[1,41,3,43,5,45,7,47,9,49,11,51,13,53,15,55,17,57,19,59,21,61,23,63,25,65,27,67,29,69,31,71,33,73,35,75,37,77,39,79]}]}]}] diff --git a/test/e2e/pao/testdata/ppc-expected-info/must-gather.sno.json b/test/e2e/pao/testdata/ppc-expected-info/must-gather.sno.json deleted file mode 100644 index 63d743aad..000000000 --- a/test/e2e/pao/testdata/ppc-expected-info/must-gather.sno.json +++ /dev/null @@ -1 +0,0 @@ -[{"name":"master","nodes":[{"name":"ocp47sno-master-0.demo.lab","smt_enabled":false,"numa_cells_count":1,"cpus_count":12,"numa_cells":[{"id":0,"cores":[0,1,2,3,4,5,6,7,8,9,10,11]}]}]},{"name":"worker","nodes":null}] diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile1.json b/test/e2e/pao/testdata/ppc-expected-profiles/profile1.json deleted file mode 100644 index d0529c064..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile1.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "must-gather-dir-path": "must-gather.bare-metal", - "mcp-name": "worker-cnf", - "reserved-cpu-count": 4, - "rt-kernel": true -} diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile1.yaml b/test/e2e/pao/testdata/ppc-expected-profiles/profile1.yaml deleted file mode 100644 index e8ceaa35c..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile1.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 1,3-39,41,43-79 - reserved: 0,2,40,42 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile2.json b/test/e2e/pao/testdata/ppc-expected-profiles/profile2.json deleted file mode 100644 index 9cce2d675..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile2.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "must-gather-dir-path": "must-gather.sno", - "mcp-name": "master", - "reserved-cpu-count": 1, - "rt-kernel": false, - "user-level-networking": false -} diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile2.yaml b/test/e2e/pao/testdata/ppc-expected-profiles/profile2.yaml deleted file mode 100644 index 9c477c717..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile2.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 1-11 - reserved: 0 - machineConfigPoolSelector: - pools.operator.machineconfiguration.openshift.io/master: "" - net: - userLevelNetworking: false - nodeSelector: - node-role.kubernetes.io/master: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: false diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile3.json b/test/e2e/pao/testdata/ppc-expected-profiles/profile3.json deleted file mode 100644 index bbd0474d1..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile3.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "must-gather-dir-path": "must-gather.bare-metal", - "mcp-name": "worker-cnf", - "reserved-cpu-count": 5, - "disable-ht": true, - "rt-kernel": true, - "split-reserved-cpus-across-numa": true, - "user-level-networking": false -} - - diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile3.yaml b/test/e2e/pao/testdata/ppc-expected-profiles/profile3.yaml deleted file mode 100644 index e1289b225..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile3.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - additionalKernelArgs: - - nosmt - cpu: - isolated: 5-39 - reserved: 0-4 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - net: - userLevelNetworking: false - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile4.json b/test/e2e/pao/testdata/ppc-expected-profiles/profile4.json deleted file mode 100644 index 099e7aadc..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile4.json +++ /dev/null @@ -1,10 +0,0 @@ -{ - "must-gather-dir-path": "must-gather.bare-metal", - "mcp-name": "worker-cnf", - "reserved-cpu-count": 12, - "rt-kernel": true, - "disable-ht": true, - "user-level-networking": false -} - - diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile4.yaml b/test/e2e/pao/testdata/ppc-expected-profiles/profile4.yaml deleted file mode 100644 index fa6ef0b0a..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile4.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - additionalKernelArgs: - - nosmt - cpu: - isolated: 1,3,5,7,9,11,13,15,17,19,21,23-39 - reserved: 0,2,4,6,8,10,12,14,16,18,20,22 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - net: - userLevelNetworking: false - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile5.json b/test/e2e/pao/testdata/ppc-expected-profiles/profile5.json deleted file mode 100644 index c0b2c34da..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile5.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "must-gather-dir-path": "must-gather.bare-metal", - "mcp-name": "worker-cnf", - "reserved-cpu-count": 4, - "disable-ht": false, - "rt-kernel": true, - "split-reserved-cpus-across-numa": true, - "user-level-networking": false -} diff --git a/test/e2e/pao/testdata/ppc-expected-profiles/profile5.yaml b/test/e2e/pao/testdata/ppc-expected-profiles/profile5.yaml deleted file mode 100644 index 8f2a0fc4d..000000000 --- a/test/e2e/pao/testdata/ppc-expected-profiles/profile5.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: performance.openshift.io/v2 -kind: PerformanceProfile -metadata: - name: performance -spec: - cpu: - isolated: 2-39,42-79 - reserved: 0-1,40-41 - machineConfigPoolSelector: - machineconfiguration.openshift.io/role: worker-cnf - net: - userLevelNetworking: false - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" - numa: - topologyPolicy: restricted - realTimeKernel: - enabled: true diff --git a/test/e2e/pao/testdata/render-expected-output/manual_kubeletconfig.yaml b/test/e2e/pao/testdata/render-expected-output/manual_kubeletconfig.yaml deleted file mode 100644 index 8ab1685a9..000000000 --- a/test/e2e/pao/testdata/render-expected-output/manual_kubeletconfig.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: KubeletConfig -metadata: - creationTimestamp: null - name: performance-manual - ownerReferences: - - apiVersion: "" - kind: PerformanceProfile - name: manual - uid: "" -spec: - kubeletConfig: - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: {} - webhook: - cacheTTL: 0s - x509: {} - authorization: - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cpuManagerPolicy: static - cpuManagerPolicyOptions: - full-pcpus-only: "true" - cpuManagerReconcilePeriod: 5s - evictionHard: - memory.available: 100Mi - evictionPressureTransitionPeriod: 0s - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - memory: 500Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memoryManagerPolicy: Static - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - reservedMemory: - - limits: - memory: 1100Mi - numaNode: 0 - reservedSystemCPUs: "0" - runtimeRequestTimeout: 0s - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - memory: 500Mi - topologyManagerPolicy: single-numa-node - volumeStatsAggPeriod: 0s - machineConfigPoolSelector: - matchLabels: - machineconfiguration.openshift.io/role: worker-cnf -status: - conditions: null diff --git a/test/e2e/pao/testdata/render-expected-output/manual_machineconfig.yaml b/test/e2e/pao/testdata/render-expected-output/manual_machineconfig.yaml deleted file mode 100644 index b924050d3..000000000 --- a/test/e2e/pao/testdata/render-expected-output/manual_machineconfig.yaml +++ /dev/null @@ -1,100 +0,0 @@ -apiVersion: machineconfiguration.openshift.io/v1 -kind: MachineConfig -metadata: - creationTimestamp: null - labels: - machineconfiguration.openshift.io/role: worker-cnf - name: 50-performance-manual - ownerReferences: - - apiVersion: "" - kind: PerformanceProfile - name: manual - uid: "" -spec: - config: - ignition: - config: - replace: - verification: {} - proxy: {} - security: - tls: {} - timeouts: {} - version: 3.2.0 - passwd: {} - storage: - files: - - contents: - source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaAoKc2V0IC1ldW8gcGlwZWZhaWwKCm5vZGVzX3BhdGg9Ii9zeXMvZGV2aWNlcy9zeXN0ZW0vbm9kZSIKaHVnZXBhZ2VzX2ZpbGU9IiR7bm9kZXNfcGF0aH0vbm9kZSR7TlVNQV9OT0RFfS9odWdlcGFnZXMvaHVnZXBhZ2VzLSR7SFVHRVBBR0VTX1NJWkV9a0IvbnJfaHVnZXBhZ2VzIgoKaWYgWyAhIC1mICIke2h1Z2VwYWdlc19maWxlfSIgXTsgdGhlbgogIGVjaG8gIkVSUk9SOiAke2h1Z2VwYWdlc19maWxlfSBkb2VzIG5vdCBleGlzdCIKICBleGl0IDEKZmkKCnRpbWVvdXQ9NjAKc2FtcGxlPTEKY3VycmVudF90aW1lPTAKd2hpbGUgWyAiJChjYXQgIiR7aHVnZXBhZ2VzX2ZpbGV9IikiIC1uZSAiJHtIVUdFUEFHRVNfQ09VTlR9IiBdOyBkbwogIGVjaG8gIiR7SFVHRVBBR0VTX0NPVU5UfSIgPiIke2h1Z2VwYWdlc19maWxlfSIKCiAgY3VycmVudF90aW1lPSQoKGN1cnJlbnRfdGltZSArIHNhbXBsZSkpCiAgaWYgWyAkY3VycmVudF90aW1lIC1ndCAkdGltZW91dCBdOyB0aGVuCiAgICBlY2hvICJFUlJPUjogJHtodWdlcGFnZXNfZmlsZX0gZG9lcyBub3QgaGF2ZSB0aGUgZXhwZWN0ZWQgbnVtYmVyIG9mIGh1Z2VwYWdlcyAke0hVR0VQQUdFU19DT1VOVH0iCiAgICBleGl0IDEKICBmaQoKICBzbGVlcCAkc2FtcGxlCmRvbmUK - verification: {} - group: {} - mode: 448 - path: /usr/local/bin/hugepages-allocation.sh - user: {} - - contents: - source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaAoKbWFzaz0iJHsxfSIKWyAtbiAiJHttYXNrfSIgXSB8fCB7IGxvZ2dlciAiJHswfTogVGhlIHJwcy1tYXNrIHBhcmFtZXRlciBpcyBtaXNzaW5nIiA7IGV4aXQgMDsgfQoKcGlkPSQoanEgJy5waWQnIC9kZXYvc3RkaW4gMj4mMSkKW1sgJD8gLWVxIDAgJiYgLW4gIiR7cGlkfSIgXV0gfHwgeyBsb2dnZXIgIiR7MH06IEZhaWxlZCB0byBleHRyYWN0IHRoZSBwaWQ6ICR7cGlkfSI7IGV4aXQgMDsgfQoKbnM9JChpcCBuZXRucyBpZGVudGlmeSAiJHtwaWR9IiAyPiYxKQpbWyAkPyAtZXEgMCAmJiAtbiAiJHtuc30iIF1dIHx8IHsgbG9nZ2VyICIkezB9IEZhaWxlZCB0byBpZGVudGlmeSB0aGUgbmFtZXNwYWNlOiAke25zfSI7IGV4aXQgMDsgfQoKIyBVcGRhdGVzIHRoZSBjb250YWluZXIgdmV0aCBSUFMgbWFzayBvbiB0aGUgbm9kZQpuZXRuc19saW5rX2luZGV4ZXM9JChpcCBuZXRucyBleGVjICIke25zfSIgaXAgLWogbGluayB8IGpxICIuW10gfCBzZWxlY3QoLmxpbmtfaW5kZXggIT0gbnVsbCkgfCAubGlua19pbmRleCIpCmZvciBsaW5rX2luZGV4IGluICR7bmV0bnNfbGlua19pbmRleGVzfTsgZG8KICBjb250YWluZXJfdmV0aD0kKGlwIC1qIGxpbmsgfCBqcSAiLltdIHwgc2VsZWN0KC5pZmluZGV4ID09ICR7bGlua19pbmRleH0pIHwgLmlmbmFtZSIgfCB0ciAtZCAnIicpCiAgZWNobyAke21hc2t9ID4gL3N5cy9kZXZpY2VzL3ZpcnR1YWwvbmV0LyR7Y29udGFpbmVyX3ZldGh9L3F1ZXVlcy9yeC0wL3Jwc19jcHVzCmRvbmUKCiMgVXBkYXRlcyB0aGUgUlBTIG1hc2sgZm9yIHRoZSBpbnRlcmZhY2UgaW5zaWRlIG9mIHRoZSBjb250YWluZXIgbmV0d29yayBuYW1lc3BhY2UKbW9kZT0kKGlwIG5ldG5zIGV4ZWMgIiR7bnN9IiBbIC13IC9zeXMgXSAmJiBlY2hvICJydyIgfHwgZWNobyAicm8iIDI+JjEpClsgJD8gLWVxIDAgXSB8fCB7IGxvZ2dlciAiJHswfSBGYWlsZWQgdG8gZGV0ZXJtaW5lIGlmIHRoZSAvc3lzIGlzIHdyaXRhYmxlOiAke21vZGV9IjsgZXhpdCAwOyB9CgppZiBbICIke21vZGV9IiA9ICJybyIgXTsgdGhlbgogICAgcmVzPSQoaXAgbmV0bnMgZXhlYyAiJHtuc30iIG1vdW50IC1vIHJlbW91bnQscncgL3N5cyAyPiYxKQogICAgWyAkPyAtZXEgMCBdIHx8IHsgbG9nZ2VyICIkezB9OiBGYWlsZWQgdG8gcmVtb3VudCAvc3lzIGFzIHJ3OiAke3Jlc30iOyBleGl0IDA7IH0KZmkKCiMgL3N5cy9jbGFzcy9uZXQgY2FuJ3QgYmUgdXNlZCByZWN1cnNpdmVseSB0byBmaW5kIHRoZSBycHNfY3B1cyBmaWxlLCB1c2UgL3N5cy9kZXZpY2VzIGluc3RlYWQKcmVzPSQoaXAgbmV0bnMgZXhlYyAiJHtuc30iIGZpbmQgL3N5cy9kZXZpY2VzIC10eXBlIGYgLW5hbWUgcnBzX2NwdXMgLWV4ZWMgc2ggLWMgImVjaG8gJHttYXNrfSB8IGNhdCA+IHt9IiBcOyAyPiYxKQpbWyAkPyAtZXEgMCAmJiAteiAiJHtyZXN9IiBdXSB8fCBsb2dnZXIgIiR7MH06IEZhaWxlZCB0byBhcHBseSB0aGUgUlBTIG1hc2s6ICR7cmVzfSIKCmlmIFsgIiR7bW9kZX0iID0gInJvIiBdOyB0aGVuCiAgICBpcCBuZXRucyBleGVjICIke25zfSIgbW91bnQgLW8gcmVtb3VudCxybyAvc3lzCiAgICBbICQ/IC1lcSAwIF0gfHwgZXhpdCAxICMgRXJyb3Igb3V0IHNvIHRoZSBwb2Qgd2lsbCBub3Qgc3RhcnQgd2l0aCBhIHdyaXRhYmxlIC9zeXMKZmkK - verification: {} - group: {} - mode: 448 - path: /usr/local/bin/low-latency-hooks.sh - user: {} - - contents: - source: data:text/plain;charset=utf-8;base64,IyEvdXNyL2Jpbi9lbnYgYmFzaAoKZGV2PSQxClsgLW4gIiR7ZGV2fSIgXSB8fCB7IGVjaG8gIlRoZSBkZXZpY2UgYXJndW1lbnQgaXMgbWlzc2luZyIgPiYyIDsgZXhpdCAxOyB9CgptYXNrPSQyClsgLW4gIiR7bWFza30iIF0gfHwgeyBlY2hvICJUaGUgbWFzayBhcmd1bWVudCBpcyBtaXNzaW5nIiA+JjIgOyBleGl0IDE7IH0KCmRldl9kaXI9Ii9zeXMvY2xhc3MvbmV0LyR7ZGV2fSIKCmZ1bmN0aW9uIGZpbmRfZGV2X2RpciB7CiAgc3lzdGVtZF9kZXZzPSQoc3lzdGVtY3RsIGxpc3QtdW5pdHMgLXQgZGV2aWNlIHwgZ3JlcCBzeXMtc3Vic3lzdGVtLW5ldC1kZXZpY2VzIHwgY3V0IC1kJyAnIC1mMSkKCiAgZm9yIHN5c3RlbWRfZGV2IGluICR7c3lzdGVtZF9kZXZzfTsgZG8KICAgIGRldl9zeXNmcz0kKHN5c3RlbWN0bCBzaG93ICIke3N5c3RlbWRfZGV2fSIgLXAgU3lzRlNQYXRoIC0tdmFsdWUpCgogICAgZGV2X29yaWdfbmFtZT0iJHtkZXZfc3lzZnMjIyovfSIKICAgIGlmIFsgIiR7ZGV2X29yaWdfbmFtZX0iID0gIiR7ZGV2fSIgXTsgdGhlbgogICAgICBkZXZfbmFtZT0iJHtzeXN0ZW1kX2RldiMjKi19IgogICAgICBkZXZfbmFtZT0iJHtkZXZfbmFtZSUlLmRldmljZX0iCiAgICAgIGlmIFsgIiR7ZGV2X25hbWV9IiA9ICIke2Rldn0iIF07IHRoZW4gIyBkaXNyZWdhcmQgdGhlIG9yaWdpbmFsIGRldmljZSB1bml0CiAgICAgICAgICAgICAgY29udGludWUKICAgICAgZmkKCiAgICAgIGVjaG8gIiR7ZGV2fSBkZXZpY2Ugd2FzIHJlbmFtZWQgdG8gJGRldl9uYW1lIgogICAgICBkZXZfZGlyPSIvc3lzL2NsYXNzL25ldC8ke2Rldl9uYW1lfSIKICAgICAgYnJlYWsKICAgIGZpCiAgZG9uZQp9CgpbIC1kICIke2Rldl9kaXJ9IiBdIHx8IGZpbmRfZGV2X2RpciAgICAgICAgICAgICAgICAjIHRoZSBuZXQgZGV2aWNlIHdhcyByZW5hbWVkLCBmaW5kIHRoZSBuZXcgbmFtZQpbIC1kICIke2Rldl9kaXJ9IiBdIHx8IHsgc2xlZXAgNTsgZmluZF9kZXZfZGlyOyB9ICAjIHNlYXJjaCBmYWlsZWQsIHdhaXQgYSBsaXR0bGUgYW5kIHRyeSBhZ2FpbgpbIC1kICIke2Rldl9kaXJ9IiBdIHx8IHsgZWNobyAiJHtkZXZfZGlyfSIgZGlyZWN0b3J5IG5vdCBmb3VuZCA+JjIgOyBleGl0IDA7IH0gIyB0aGUgaW50ZXJmYWNlIGRpc2FwcGVhcmVkLCBub3QgYW4gZXJyb3IKCmZpbmQgIiR7ZGV2X2Rpcn0iL3F1ZXVlcyAtdHlwZSBmIC1uYW1lIHJwc19jcHVzIC1leGVjIHNoIC1jICJlY2hvICR7bWFza30gfCBjYXQgPiB7fSIgXDs= - verification: {} - group: {} - mode: 448 - path: /usr/local/bin/set-rps-mask.sh - user: {} - - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgojIFdlIHNob3VsZCBjb3B5IHBhc3RlIHRoZSBkZWZhdWx0IHJ1bnRpbWUgYmVjYXVzZSB0aGlzIHNuaXBwZXQgd2lsbCBvdmVycmlkZSB0aGUgd2hvbGUgcnVudGltZXMgc2VjdGlvbgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLnJ1bmNdCnJ1bnRpbWVfcGF0aCA9ICIiCnJ1bnRpbWVfdHlwZSA9ICJvY2kiCnJ1bnRpbWVfcm9vdCA9ICIvcnVuL3J1bmMiCgojIFRoZSBDUkktTyB3aWxsIGNoZWNrIHRoZSBhbGxvd2VkX2Fubm90YXRpb25zIHVuZGVyIHRoZSBydW50aW1lIGhhbmRsZXIgYW5kIGFwcGx5IGhpZ2gtcGVyZm9ybWFuY2UgaG9va3Mgd2hlbiBvbmUgb2YKIyBoaWdoLXBlcmZvcm1hbmNlIGFubm90YXRpb25zIHByZXNlbnRzIHVuZGVyIGl0LgojIFdlIHNob3VsZCBwcm92aWRlIHRoZSBydW50aW1lX3BhdGggYmVjYXVzZSB3ZSBuZWVkIHRvIGluZm9ybSB0aGF0IHdlIHdhbnQgdG8gcmUtdXNlIHJ1bmMgYmluYXJ5IGFuZCB3ZQojIGRvIG5vdCBoYXZlIGhpZ2gtcGVyZm9ybWFuY2UgYmluYXJ5IHVuZGVyIHRoZSAkUEFUSCB0aGF0IHdpbGwgcG9pbnQgdG8gaXQuCltjcmlvLnJ1bnRpbWUucnVudGltZXMuaGlnaC1wZXJmb3JtYW5jZV0KcnVudGltZV9wYXRoID0gIi9iaW4vcnVuYyIKcnVudGltZV90eXBlID0gIm9jaSIKcnVudGltZV9yb290ID0gIi9ydW4vcnVuYyIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iXQo= - verification: {} - group: {} - mode: 420 - path: /etc/crio/crio.conf.d/99-runtimes.conf - user: {} - - contents: - source: data:text/plain;charset=utf-8;base64,ewogICJ2ZXJzaW9uIjogIjEuMC4wIiwKICAiaG9vayI6IHsKICAgICJwYXRoIjogIi91c3IvbG9jYWwvYmluL2xvdy1sYXRlbmN5LWhvb2tzLnNoIiwKICAgICJhcmdzIjogWyJsb3ctbGF0ZW5jeS1ob29rcy5zaCIsICIwMDAwMDAwMSJdCiAgfSwKICAid2hlbiI6IHsKICAgICJhbHdheXMiOiB0cnVlCiAgfSwKICAic3RhZ2VzIjogWyJwcmVzdGFydCJdCn0K - verification: {} - group: {} - mode: 420 - path: /etc/containers/oci/hooks.d/99-low-latency-hooks.json - user: {} - - contents: - source: data:text/plain;charset=utf-8;base64,U1VCU1lTVEVNPT0ibmV0IiwgQUNUSU9OPT0iYWRkIiwgRU5We0RFVlBBVEh9IT0iL2RldmljZXMvdmlydHVhbC9uZXQvdmV0aCoiLCBUQUcrPSJzeXN0ZW1kIiwgRU5We1NZU1RFTURfV0FOVFN9PSJ1cGRhdGUtcnBzQCVrLnNlcnZpY2UiCg== - verification: {} - group: {} - mode: 420 - path: /etc/udev/rules.d/99-netdev-rps.rules - user: {} - systemd: - units: - - contents: | - [Unit] - Description=Hugepages-1048576kB allocation on the node 0 - Before=kubelet.service - - [Service] - Environment=HUGEPAGES_COUNT=1 - Environment=HUGEPAGES_SIZE=1048576 - Environment=NUMA_NODE=0 - Type=oneshot - RemainAfterExit=true - ExecStart=/usr/local/bin/hugepages-allocation.sh - - [Install] - WantedBy=multi-user.target - enabled: true - name: hugepages-allocation-1048576kB-NUMA0.service - - contents: | - [Unit] - Description=Sets network devices RPS mask - - [Service] - Type=oneshot - ExecStart=/usr/local/bin/set-rps-mask.sh %i 00000001 - name: update-rps@.service - extensions: null - fips: false - kernelArguments: null - kernelType: realtime - osImageURL: "" diff --git a/test/e2e/pao/testdata/render-expected-output/manual_runtimeclass.yaml b/test/e2e/pao/testdata/render-expected-output/manual_runtimeclass.yaml deleted file mode 100644 index 3116419f9..000000000 --- a/test/e2e/pao/testdata/render-expected-output/manual_runtimeclass.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: node.k8s.io/v1beta1 -handler: high-performance -kind: RuntimeClass -metadata: - creationTimestamp: null - name: performance-manual - ownerReferences: - - apiVersion: "" - kind: PerformanceProfile - name: manual - uid: "" -scheduling: - nodeSelector: - node-role.kubernetes.io/worker-cnf: "" diff --git a/test/e2e/pao/testdata/render-expected-output/manual_tuned.yaml b/test/e2e/pao/testdata/render-expected-output/manual_tuned.yaml deleted file mode 100644 index a8a94163b..000000000 --- a/test/e2e/pao/testdata/render-expected-output/manual_tuned.yaml +++ /dev/null @@ -1,67 +0,0 @@ -apiVersion: tuned.openshift.io/v1 -kind: Tuned -metadata: - creationTimestamp: null - name: openshift-node-performance-manual - namespace: openshift-cluster-node-tuning-operator - ownerReferences: - - apiVersion: "" - kind: PerformanceProfile - name: manual - uid: "" -spec: - profile: - - data: "[main]\nsummary=Openshift node optimized for deterministic performance - at the cost of increased power consumption, focused on low latency network performance. - Based on Tuned 2.11 and Cluster node tuning (oc 4.5)\ninclude=openshift-node,cpu-partitioning\n\n# - Inheritance of base profiles legend:\n# cpu-partitioning -> network-latency - -> latency-performance\n# https://github.com/redhat-performance/tuned/blob/master/profiles/latency-performance/tuned.conf\n# - https://github.com/redhat-performance/tuned/blob/master/profiles/network-latency/tuned.conf\n# - https://github.com/redhat-performance/tuned/blob/master/profiles/cpu-partitioning/tuned.conf\n\n# - All values are mapped with a comment where a parent profile contains them.\n# - Different values will override the original values in parent profiles.\n\n[variables]\n#> - isolated_cores take a list of ranges; e.g. isolated_cores=2,4-7\n\nisolated_cores=1-3\n\n\nnot_isolated_cores_expanded=${f:cpulist_invert:${isolated_cores_expanded}}\n\n[cpu]\n#> - latency-performance\n#> (override)\nforce_latency=cstate.id:1|3\ngovernor=performance\nenergy_perf_bias=performance\nmin_perf_pct=100\n\n[service]\nservice.stalld=start,enable\n\n[vm]\n#> - network-latency\ntransparent_hugepages=never\n\n\n[irqbalance]\n#> Override - the value set by cpu-partitioning with an empty one\nbanned_cpus=\"\"\n\n\n[scheduler]\nruntime=0\ngroup.ksoftirqd=0:f:11:*:ksoftirqd.*\ngroup.rcuc=0:f:11:*:rcuc.*\nsched_rt_runtime_us=-1\nsched_min_granularity_ns=10000000\nsched_migration_cost_ns=5000000\nnuma_balancing=0\n\ndefault_irq_smp_affinity - = ignore\n\n\n[sysctl]\n#> cpu-partitioning #realtime\nkernel.hung_task_timeout_secs - = 600\n#> cpu-partitioning #realtime\nkernel.nmi_watchdog = 0\n#> realtime\nkernel.sched_rt_runtime_us - = -1\n# cpu-partitioning and realtime for RHEL disable it (= 0)\n# OCP is too dynamic when partitioning and needs to - evacuate\n#> scheduled timers when starting a guaranteed workload (= 1)\nkernel.timer_migration = 1\n#> - network-latency\nkernel.numa_balancing=0\nnet.core.busy_read=50\nnet.core.busy_poll=50\nnet.ipv4.tcp_fastopen=3\n#> - cpu-partitioning #realtime\nvm.stat_interval = 10\n\n# ktune sysctl settings - for rhel6 servers, maximizing i/o throughput\n#\n# Minimal preemption granularity - for CPU-bound tasks:\n# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds)\n#> - latency-performance\nkernel.sched_min_granularity_ns=10000000\n\n# If a workload - mostly uses anonymous memory and it hits this limit, the entire\n# working set - is buffered for I/O, and any more write buffering would require\n# swapping, - so it's time to throttle writes until I/O can catch up. Workloads\n# that mostly - use file mappings may be able to use even higher values.\n#\n# The generator - of dirty data starts writeback at this percentage (system default\n# is 20%)\n#> - latency-performance\nvm.dirty_ratio=10\n\n# Start background writeback (via - writeback threads) at this percentage (system\n# default is 10%)\n#> latency-performance\nvm.dirty_background_ratio=3\n\n# - The swappiness parameter controls the tendency of the kernel to move\n# processes - out of physical memory and onto the swap disk.\n# 0 tells the kernel to avoid - swapping processes out of physical memory\n# for as long as possible\n# 100 - tells the kernel to aggressively swap processes out of physical memory\n# and - move them to swap cache\n#> latency-performance\nvm.swappiness=10\n\n# The total - time the scheduler will consider a migrated process\n# \"cache hot\" and thus - less likely to be re-migrated\n# (system default is 500000, i.e. 0.5 ms)\n#> - latency-performance\nkernel.sched_migration_cost_ns=5000000\n\n[selinux]\n#> - Custom (atomic host)\navc_cache_threshold=8192\n\n\n[net]\nnf_conntrack_hashsize=131072\n\n\n[bootloader]\n# - set empty values to disable RHEL initrd setting in cpu-partitioning\ninitrd_remove_dir=\ninitrd_dst_img=\ninitrd_add_dir=\n# - overrides cpu-partitioning cmdline\ncmdline_cpu_part=+nohz=on rcu_nocbs=${isolated_cores} - tuned.non_isolcpus=${not_isolated_cpumask} intel_pstate=disable nosoftlockup\n\ncmdline_realtime=+tsc=nowatchdog - intel_iommu=on iommu=pt isolcpus=managed_irq,${isolated_cores} systemd.cpu_affinity=${not_isolated_cores_expanded}\n\ncmdline_hugepages=+ - default_hugepagesz=1G hugepagesz=2M hugepages=128 \ncmdline_additionalArg=+ - nmi_watchdog=0 audit=0 mce=off processor.max_cstate=1 idle=poll intel_idle.max_cstate=0 - \n" - name: openshift-node-performance-manual - recommend: - - machineConfigLabels: - machineconfiguration.openshift.io/role: worker-cnf - operand: - debug: false - priority: 20 - profile: openshift-node-performance-manual -status: {} diff --git a/tools/docs-generator/docs-generator.go b/tools/docs-generator/docs-generator.go deleted file mode 100644 index aa2d7efa1..000000000 --- a/tools/docs-generator/docs-generator.go +++ /dev/null @@ -1,263 +0,0 @@ -// This generator is based on the api doc generator of the Prometheus Operator -// see https://github.com/coreos/prometheus-operator/tree/master/cmd/po-docgen -// Changes: -// - also print string types -package main - -import ( - "bytes" - "fmt" - "go/ast" - "go/doc" - "go/parser" - "go/token" - "os" - "reflect" - "strings" -) - -func main() { - printAPIDocs(os.Args[2:]) -} - -const ( - firstParagraph = ` -# Performance Profile - -This document documents the PerformanceProfile API introduced by the Performance Operator. - -> This document is generated from code comments on the ` + "`PerformanceProfile`" + ` struct. -> When contributing a change to this document please do so by changing those code comments.` -) - -var ( - links = map[string]string{ - "metav1.ObjectMeta": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#objectmeta-v1-meta", - "metav1.ListMeta": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#listmeta-v1-meta", - "metav1.LabelSelector": "https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#labelselector-v1-meta", - } - - selfLinks = map[string]string{} -) - -func toSectionLink(name string) string { - name = strings.ToLower(name) - name = strings.Replace(name, " ", "-", -1) - return name -} - -func printTOC(types []KubeTypes) { - fmt.Printf("\n## Table of Contents\n") - for _, t := range types { - strukt := t[0] - fmt.Printf("* [%s](#%s)\n", strukt.Name, toSectionLink(strukt.Name)) - } -} - -func printAPIDocs(paths []string) { - fmt.Println(firstParagraph) - - types := ParseDocumentationFrom(paths) - for _, t := range types { - strukt := t[0] - selfLinks[strukt.Name] = "#" + strings.ToLower(strukt.Name) - } - - // we need to parse once more to now add the self links - types = ParseDocumentationFrom(paths) - - printTOC(types) - - for _, t := range types { - strukt := t[0] - fmt.Printf("\n## %s\n\n%s\n\n", strukt.Name, strukt.Doc) - - if len(t) > 1 { - fmt.Println("| Field | Description | Scheme | Required |") - fmt.Println("| ----- | ----------- | ------ | -------- |") - fields := t[1:(len(t))] - for _, f := range fields { - fmt.Println("|", f.Name, "|", f.Doc, "|", f.Type, "|", f.Mandatory, "|") - } - } else { - fmt.Printf("%s is of type `string`.\n", strukt.Name) - } - - fmt.Println("") - fmt.Println("[Back to TOC](#table-of-contents)") - } -} - -// Pair of strings. We keed the name of fields and the doc -type Pair struct { - Name, Doc, Type string - Mandatory bool -} - -// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself -type KubeTypes []Pair - -// ParseDocumentationFrom gets all types' documentation and returns them as an -// array. Each type is again represented as an array (we have to use arrays as we -// need to be sure for the order of the fields). This function returns fields and -// struct definitions that have no documentation as {name, ""}. -func ParseDocumentationFrom(srcs []string) []KubeTypes { - var docForTypes []KubeTypes - - for _, src := range srcs { - pkg := astFrom(src) - - for _, kubType := range pkg.Types { - if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok { - var ks KubeTypes - ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc), "", false}) - - for _, field := range structType.Fields.List { - typeString := fieldType(field.Type) - fieldMandatory := fieldRequired(field) - if n := fieldName(field); n != "-" { - fieldDoc := fmtRawDoc(field.Doc.Text()) - ks = append(ks, Pair{n, fieldDoc, typeString, fieldMandatory}) - } - } - docForTypes = append(docForTypes, ks) - } else { - var ks KubeTypes - ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc), "string", false}) - docForTypes = append(docForTypes, ks) - } - } - } - - return docForTypes -} - -func astFrom(filePath string) *doc.Package { - fset := token.NewFileSet() - m := make(map[string]*ast.File) - - f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) - if err != nil { - fmt.Println(err) - return nil - } - - m[filePath] = f - apkg, _ := ast.NewPackage(fset, m, nil, nil) - - return doc.New(apkg, "", 0) -} - -func fmtRawDoc(rawDoc string) string { - var buffer bytes.Buffer - delPrevChar := func() { - if buffer.Len() > 0 { - buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" - } - } - - // Ignore all lines after --- - rawDoc = strings.Split(rawDoc, "---")[0] - - for _, line := range strings.Split(rawDoc, "\n") { - line = strings.TrimRight(line, " ") - leading := strings.TrimLeft(line, " ") - switch { - case len(line) == 0: // Keep paragraphs - delPrevChar() - buffer.WriteString("\n\n") - case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs - case strings.HasPrefix(leading, "+"): // Ignore instructions to go2idl - default: - if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { - delPrevChar() - line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." - } else { - line += " " - } - buffer.WriteString(line) - } - } - - postDoc := strings.TrimRight(buffer.String(), "\n") - postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " - postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " - postDoc = strings.Replace(postDoc, "\n", "\\n", -1) - postDoc = strings.Replace(postDoc, "\t", "\\t", -1) - postDoc = strings.Replace(postDoc, "|", "\\|", -1) - postDoc = strings.Replace(postDoc, "<", "<", -1) - postDoc = strings.Replace(postDoc, ">", ">", -1) - - return postDoc -} - -func toLink(typeName string) string { - selfLink, hasSelfLink := selfLinks[typeName] - if hasSelfLink { - return wrapInLink(typeName, selfLink) - } - - link, hasLink := links[typeName] - if hasLink { - return wrapInLink(typeName, link) - } - - return typeName -} - -func wrapInLink(text, link string) string { - return fmt.Sprintf("[%s](%s)", text, link) -} - -// fieldName returns the name of the field as it should appear in JSON format -// "-" indicates that this field is not part of the JSON representation -func fieldName(field *ast.Field) string { - jsonTag := "" - if field.Tag != nil { - jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation - if strings.Contains(jsonTag, "inline") { - return "-" - } - } - - jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-" - if jsonTag == "" { - if field.Names != nil { - return field.Names[0].Name - } - return field.Type.(*ast.Ident).Name - } - return jsonTag -} - -// fieldRequired returns whether a field is a required field. -func fieldRequired(field *ast.Field) bool { - jsonTag := "" - if field.Tag != nil { - jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation - return !strings.Contains(jsonTag, "omitempty") - } - - return false -} - -func fieldType(typ ast.Expr) string { - switch typ.(type) { - case *ast.Ident: - return toLink(typ.(*ast.Ident).Name) - case *ast.StarExpr: - return "*" + toLink(fieldType(typ.(*ast.StarExpr).X)) - case *ast.SelectorExpr: - e := typ.(*ast.SelectorExpr) - pkg := e.X.(*ast.Ident) - t := e.Sel - return toLink(pkg.Name + "." + t.Name) - case *ast.ArrayType: - return "[]" + toLink(fieldType(typ.(*ast.ArrayType).Elt)) - case *ast.MapType: - mapType := typ.(*ast.MapType) - return "map[" + toLink(fieldType(mapType.Key)) + "]" + toLink(fieldType(mapType.Value)) - default: - return "" - } -} diff --git a/vendor/github.com/RHsyseng/operator-utils/LICENSE b/vendor/github.com/RHsyseng/operator-utils/LICENSE deleted file mode 100644 index 261eeb9e9..000000000 --- a/vendor/github.com/RHsyseng/operator-utils/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema.go b/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema.go deleted file mode 100644 index 0fb9c48a4..000000000 --- a/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema.go +++ /dev/null @@ -1,68 +0,0 @@ -package validation - -import ( - "fmt" - - "github.com/ghodss/yaml" - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" - "github.com/go-openapi/validate" -) - -type Schema interface { - GetMissingEntries(crInstance interface{}) []SchemaEntry - Validate(data interface{}) error -} - -func New(crd []byte) (Schema, error) { - object := &customResourceDefinition{} - err := yaml.Unmarshal(crd, object) - if err != nil { - return nil, err - } - return &openAPIV3Schema{&object.Spec.Validation.OpenAPIV3Schema}, nil -} - -func NewVersioned(crd []byte, version string) (Schema, error) { - object := &customResourceDefinition{} - err := yaml.Unmarshal(crd, object) - if err != nil { - return nil, err - } - for _, v := range object.Spec.Versions { - if v.Name == version { - return &openAPIV3Schema{&v.Schema.OpenAPIV3Schema}, nil - } - } - return &openAPIV3Schema{}, fmt.Errorf("no version %s detected in crd", version) -} - -type openAPIV3Schema struct { - schema *spec.Schema -} - -func (schema *openAPIV3Schema) GetMissingEntries(crInstance interface{}) []SchemaEntry { - return getMissingEntries(schema.schema, crInstance) -} - -func (schema *openAPIV3Schema) Validate(data interface{}) error { - return validate.AgainstSchema(schema.schema, data, strfmt.Default) -} - -type customResourceDefinition struct { - Spec customResourceDefinitionSpec `json:"spec,omitempty"` -} - -type customResourceDefinitionSpec struct { - Versions []customResourceDefinitionVersion `json:"versions,omitempty"` - Validation customResourceDefinitionValidation `json:"validation,omitempty"` -} - -type customResourceDefinitionVersion struct { - Name string `json:"Name,omitempty"` - Schema customResourceDefinitionValidation `json:"schema,omitempty"` -} - -type customResourceDefinitionValidation struct { - OpenAPIV3Schema spec.Schema `json:"openAPIV3Schema,omitempty"` -} diff --git a/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema_sync.go b/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema_sync.go deleted file mode 100644 index 4c9d9072f..000000000 --- a/vendor/github.com/RHsyseng/operator-utils/pkg/validation/schema_sync.go +++ /dev/null @@ -1,136 +0,0 @@ -package validation - -import ( - "fmt" - "github.com/go-openapi/spec" - "reflect" - "strings" -) - -type SchemaEntry struct { - Path string - Type string -} - -func getMissingEntries(schema *spec.Schema, crInstance interface{}) []SchemaEntry { - var entries []SchemaEntry - crStruct := reflect.ValueOf(crInstance).Elem().Type() - if field, found := crStruct.FieldByName("Spec"); found { - entries = validateField(entries, *schema, "", field) - } - if field, found := crStruct.FieldByName("Status"); found { - entries = validateField(entries, *schema, "", field) - } - return entries -} - -func validateField(entries []SchemaEntry, schema spec.Schema, context string, field reflect.StructField) []SchemaEntry { - reflectType := getActualType(field) - if !field.Anonymous { - name := getFieldName(field) - context = fmt.Sprintf("%s/%s", context, name) - schema = schema.Properties[name] - expectedType := equivalentSchemaType(reflectType.Kind()) - if !schema.Type.Contains(expectedType) { - entries = append(entries, SchemaEntry{context, expectedType}) - } - } - if isArray(reflectType) { - reflectType = reflectType.Elem() - if schema.Items != nil { - schema = *schema.Items.Schema - } - } - for _, field := range getChildren(field) { - entries = validateField(entries, schema, context, field) - } - return entries -} - -func getChildren(field reflect.StructField) []reflect.StructField { - reflectType := getActualType(field) - if reflectType.Kind() == reflect.Struct { - return getFields(reflectType) - } else if isArray(reflectType) { - elem := reflectType.Elem() - if elem.Kind() == reflect.Struct { - return getFields(elem) - } - } - return nil -} - -func isArray(fieldType reflect.Type) bool { - switch fieldType.Kind() { - case reflect.Slice: - return true - case reflect.Array: - return true - default: - return false - } -} - -func getFields(fieldType reflect.Type) []reflect.StructField { - var children []reflect.StructField - for index := 0; index < fieldType.NumField(); index++ { - children = append(children, fieldType.Field(index)) - } - return children -} - -func getActualType(field reflect.StructField) reflect.Type { - reflectType := field.Type - if reflectType.Kind() == reflect.Ptr { - reflectType = reflectType.Elem() - } - return reflectType -} - -func equivalentSchemaType(kind reflect.Kind) string { - switch kind { - case reflect.String: - return "string" - case reflect.Float32: - return "number" - case reflect.Float64: - return "number" - case reflect.Int: - return "integer" - case reflect.Int8: - return "integer" - case reflect.Int16: - return "integer" - case reflect.Int32: - return "integer" - case reflect.Int64: - return "integer" - case reflect.Bool: - return "boolean" - case reflect.Struct: - return "object" - case reflect.Ptr: - return "object" - case reflect.Map: - return "object" - case reflect.Array: - return "array" - case reflect.Slice: - return "array" - default: - return "" - } -} - -func getFieldName(field reflect.StructField) string { - tag := string(field.Tag) - parts := strings.Split(tag, ":") - if len(parts) == 1 || parts[0] != "json" { - return field.Name - } else { - quotesRemoved := strings.Replace(parts[1], "\"", "", -1) - commaDelimited := strings.Split(quotesRemoved, ",") - spaceDelimited := strings.Split(commaDelimited[0], " ") - return spaceDelimited[0] - } -} diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE deleted file mode 100644 index ae80b6720..000000000 --- a/vendor/github.com/StackExchange/wmi/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Stack Exchange - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md deleted file mode 100644 index 426d1a46b..000000000 --- a/vendor/github.com/StackExchange/wmi/README.md +++ /dev/null @@ -1,6 +0,0 @@ -wmi -=== - -Package wmi provides a WQL interface to Windows WMI. - -Note: It interfaces with WMI on the local machine, therefore it only runs on Windows. diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go deleted file mode 100644 index 3ff875630..000000000 --- a/vendor/github.com/StackExchange/wmi/swbemservices.go +++ /dev/null @@ -1,260 +0,0 @@ -// +build windows - -package wmi - -import ( - "fmt" - "reflect" - "runtime" - "sync" - - "github.com/go-ole/go-ole" - "github.com/go-ole/go-ole/oleutil" -) - -// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx -type SWbemServices struct { - //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance - cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method - sWbemLocatorIUnknown *ole.IUnknown - sWbemLocatorIDispatch *ole.IDispatch - queries chan *queryRequest - closeError chan error - lQueryorClose sync.Mutex -} - -type queryRequest struct { - query string - dst interface{} - args []interface{} - finished chan error -} - -// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI -func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) { - //fmt.Println("InitializeSWbemServices: Starting") - //TODO: implement connectServerArgs as optional argument for init with connectServer call - s := new(SWbemServices) - s.cWMIClient = c - s.queries = make(chan *queryRequest) - initError := make(chan error) - go s.process(initError) - - err, ok := <-initError - if ok { - return nil, err //Send error to caller - } - //fmt.Println("InitializeSWbemServices: Finished") - return s, nil -} - -// Close will clear and release all of the SWbemServices resources -func (s *SWbemServices) Close() error { - s.lQueryorClose.Lock() - if s == nil || s.sWbemLocatorIDispatch == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices is not Initialized") - } - if s.queries == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices has been closed") - } - //fmt.Println("Close: sending close request") - var result error - ce := make(chan error) - s.closeError = ce //Race condition if multiple callers to close. May need to lock here - close(s.queries) //Tell background to shut things down - s.lQueryorClose.Unlock() - err, ok := <-ce - if ok { - result = err - } - //fmt.Println("Close: finished") - return result -} - -func (s *SWbemServices) process(initError chan error) { - //fmt.Println("process: starting background thread initialization") - //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) - if err != nil { - oleCode := err.(*ole.OleError).Code() - if oleCode != ole.S_OK && oleCode != S_FALSE { - initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err) - return - } - } - defer ole.CoUninitialize() - - unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") - if err != nil { - initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err) - return - } else if unknown == nil { - initError <- ErrNilCreateObject - return - } - defer unknown.Release() - s.sWbemLocatorIUnknown = unknown - - dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch) - if err != nil { - initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err) - return - } - defer dispatch.Release() - s.sWbemLocatorIDispatch = dispatch - - // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs - //fmt.Println("process: initialized. closing initError") - close(initError) - //fmt.Println("process: waiting for queries") - for q := range s.queries { - //fmt.Printf("process: new query: len(query)=%d\n", len(q.query)) - errQuery := s.queryBackground(q) - //fmt.Println("process: s.queryBackground finished") - if errQuery != nil { - q.finished <- errQuery - } - close(q.finished) - } - //fmt.Println("process: queries channel closed") - s.queries = nil //set channel to nil so we know it is closed - //TODO: I think the Release/Clear calls can panic if things are in a bad state. - //TODO: May need to recover from panics and send error to method caller instead. - close(s.closeError) -} - -// Query runs the WQL query using a SWbemServices instance and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - s.lQueryorClose.Lock() - if s == nil || s.sWbemLocatorIDispatch == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices is not Initialized") - } - if s.queries == nil { - s.lQueryorClose.Unlock() - return fmt.Errorf("SWbemServices has been closed") - } - - //fmt.Println("Query: Sending query request") - qr := queryRequest{ - query: query, - dst: dst, - args: connectServerArgs, - finished: make(chan error), - } - s.queries <- &qr - s.lQueryorClose.Unlock() - err, ok := <-qr.finished - if ok { - //fmt.Println("Query: Finished with error") - return err //Send error to caller - } - //fmt.Println("Query: Finished") - return nil -} - -func (s *SWbemServices) queryBackground(q *queryRequest) error { - if s == nil || s.sWbemLocatorIDispatch == nil { - return fmt.Errorf("SWbemServices is not Initialized") - } - wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart - //fmt.Println("queryBackground: Starting") - - dv := reflect.ValueOf(q.dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType := checkMultiArg(dv) - if mat == multiArgTypeInvalid { - return ErrInvalidEntityType - } - - // service is a SWbemServices - serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...) - if err != nil { - return err - } - service := serviceRaw.ToIDispatch() - defer serviceRaw.Clear() - - // result is a SWBemObjectSet - resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query) - if err != nil { - return err - } - result := resultRaw.ToIDispatch() - defer resultRaw.Clear() - - count, err := oleInt64(result, "Count") - if err != nil { - return err - } - - enumProperty, err := result.GetProperty("_NewEnum") - if err != nil { - return err - } - defer enumProperty.Clear() - - enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - if enum == nil { - return fmt.Errorf("can't get IEnumVARIANT, enum is nil") - } - defer enum.Release() - - // Initialize a slice with Count capacity - dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) - - var errFieldMismatch error - for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { - if err != nil { - return err - } - - err := func() error { - // item is a SWbemObject, but really a Win32_Process - item := itemRaw.ToIDispatch() - defer item.Release() - - ev := reflect.New(elemType) - if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - return nil - }() - if err != nil { - return err - } - } - //fmt.Println("queryBackground: Finished") - return errFieldMismatch -} diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go deleted file mode 100644 index eab18cbfe..000000000 --- a/vendor/github.com/StackExchange/wmi/wmi.go +++ /dev/null @@ -1,501 +0,0 @@ -// +build windows - -/* -Package wmi provides a WQL interface for WMI on Windows. - -Example code to print names of running processes: - - type Win32_Process struct { - Name string - } - - func main() { - var dst []Win32_Process - q := wmi.CreateQuery(&dst, "") - err := wmi.Query(q, &dst) - if err != nil { - log.Fatal(err) - } - for i, v := range dst { - println(i, v.Name) - } - } - -*/ -package wmi - -import ( - "bytes" - "errors" - "fmt" - "log" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "github.com/go-ole/go-ole" - "github.com/go-ole/go-ole/oleutil" -) - -var l = log.New(os.Stdout, "", log.LstdFlags) - -var ( - ErrInvalidEntityType = errors.New("wmi: invalid entity type") - // ErrNilCreateObject is the error returned if CreateObject returns nil even - // if the error was nil. - ErrNilCreateObject = errors.New("wmi: create object returned nil") - lock sync.Mutex -) - -// S_FALSE is returned by CoInitializeEx if it was already called on this thread. -const S_FALSE = 0x00000001 - -// QueryNamespace invokes Query with the given namespace on the local machine. -func QueryNamespace(query string, dst interface{}, namespace string) error { - return Query(query, dst, nil, namespace) -} - -// Query runs the WQL query and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -// -// Query is a wrapper around DefaultClient.Query. -func Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - if DefaultClient.SWbemServicesClient == nil { - return DefaultClient.Query(query, dst, connectServerArgs...) - } - return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...) -} - -// A Client is an WMI query client. -// -// Its zero value (DefaultClient) is a usable client. -type Client struct { - // NonePtrZero specifies if nil values for fields which aren't pointers - // should be returned as the field types zero value. - // - // Setting this to true allows stucts without pointer fields to be used - // without the risk failure should a nil value returned from WMI. - NonePtrZero bool - - // PtrNil specifies if nil values for pointer fields should be returned - // as nil. - // - // Setting this to true will set pointer fields to nil where WMI - // returned nil, otherwise the types zero value will be returned. - PtrNil bool - - // AllowMissingFields specifies that struct fields not present in the - // query result should not result in an error. - // - // Setting this to true allows custom queries to be used with full - // struct definitions instead of having to define multiple structs. - AllowMissingFields bool - - // SWbemServiceClient is an optional SWbemServices object that can be - // initialized and then reused across multiple queries. If it is null - // then the method will initialize a new temporary client each time. - SWbemServicesClient *SWbemServices -} - -// DefaultClient is the default Client and is used by Query, QueryNamespace -var DefaultClient = &Client{} - -// Query runs the WQL query and appends the values to dst. -// -// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in -// the query must have the same name in dst. Supported types are all signed and -// unsigned integers, time.Time, string, bool, or a pointer to one of those. -// Array types are not supported. -// -// By default, the local machine and default namespace are used. These can be -// changed using connectServerArgs. See -// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details. -func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error { - dv := reflect.ValueOf(dst) - if dv.Kind() != reflect.Ptr || dv.IsNil() { - return ErrInvalidEntityType - } - dv = dv.Elem() - mat, elemType := checkMultiArg(dv) - if mat == multiArgTypeInvalid { - return ErrInvalidEntityType - } - - lock.Lock() - defer lock.Unlock() - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) - if err != nil { - oleCode := err.(*ole.OleError).Code() - if oleCode != ole.S_OK && oleCode != S_FALSE { - return err - } - } - defer ole.CoUninitialize() - - unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator") - if err != nil { - return err - } else if unknown == nil { - return ErrNilCreateObject - } - defer unknown.Release() - - wmi, err := unknown.QueryInterface(ole.IID_IDispatch) - if err != nil { - return err - } - defer wmi.Release() - - // service is a SWbemServices - serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...) - if err != nil { - return err - } - service := serviceRaw.ToIDispatch() - defer serviceRaw.Clear() - - // result is a SWBemObjectSet - resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query) - if err != nil { - return err - } - result := resultRaw.ToIDispatch() - defer resultRaw.Clear() - - count, err := oleInt64(result, "Count") - if err != nil { - return err - } - - enumProperty, err := result.GetProperty("_NewEnum") - if err != nil { - return err - } - defer enumProperty.Clear() - - enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - if enum == nil { - return fmt.Errorf("can't get IEnumVARIANT, enum is nil") - } - defer enum.Release() - - // Initialize a slice with Count capacity - dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count))) - - var errFieldMismatch error - for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) { - if err != nil { - return err - } - - err := func() error { - // item is a SWbemObject, but really a Win32_Process - item := itemRaw.ToIDispatch() - defer item.Release() - - ev := reflect.New(elemType) - if err = c.loadEntity(ev.Interface(), item); err != nil { - if _, ok := err.(*ErrFieldMismatch); ok { - // We continue loading entities even in the face of field mismatch errors. - // If we encounter any other error, that other error is returned. Otherwise, - // an ErrFieldMismatch is returned. - errFieldMismatch = err - } else { - return err - } - } - if mat != multiArgTypeStructPtr { - ev = ev.Elem() - } - dv.Set(reflect.Append(dv, ev)) - return nil - }() - if err != nil { - return err - } - } - return errFieldMismatch -} - -// ErrFieldMismatch is returned when a field is to be loaded into a different -// type than the one it was stored from, or when a field is missing or -// unexported in the destination struct. -// StructType is the type of the struct pointed to by the destination argument. -type ErrFieldMismatch struct { - StructType reflect.Type - FieldName string - Reason string -} - -func (e *ErrFieldMismatch) Error() string { - return fmt.Sprintf("wmi: cannot load field %q into a %q: %s", - e.FieldName, e.StructType, e.Reason) -} - -var timeType = reflect.TypeOf(time.Time{}) - -// loadEntity loads a SWbemObject into a struct pointer. -func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) { - v := reflect.ValueOf(dst).Elem() - for i := 0; i < v.NumField(); i++ { - f := v.Field(i) - of := f - isPtr := f.Kind() == reflect.Ptr - if isPtr { - ptr := reflect.New(f.Type().Elem()) - f.Set(ptr) - f = f.Elem() - } - n := v.Type().Field(i).Name - if !f.CanSet() { - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "CanSet() is false", - } - } - prop, err := oleutil.GetProperty(src, n) - if err != nil { - if !c.AllowMissingFields { - errFieldMismatch = &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "no such struct field", - } - } - continue - } - defer prop.Clear() - - if prop.VT == 0x1 { //VT_NULL - continue - } - - switch val := prop.Value().(type) { - case int8, int16, int32, int64, int: - v := reflect.ValueOf(val).Int() - switch f.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - f.SetInt(v) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - f.SetUint(uint64(v)) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not an integer class", - } - } - case uint8, uint16, uint32, uint64: - v := reflect.ValueOf(val).Uint() - switch f.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - f.SetInt(int64(v)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - f.SetUint(v) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not an integer class", - } - } - case string: - switch f.Kind() { - case reflect.String: - f.SetString(val) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - iv, err := strconv.ParseInt(val, 10, 64) - if err != nil { - return err - } - f.SetInt(iv) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - uv, err := strconv.ParseUint(val, 10, 64) - if err != nil { - return err - } - f.SetUint(uv) - case reflect.Struct: - switch f.Type() { - case timeType: - if len(val) == 25 { - mins, err := strconv.Atoi(val[22:]) - if err != nil { - return err - } - val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60) - } - t, err := time.Parse("20060102150405.000000-0700", val) - if err != nil { - return err - } - f.Set(reflect.ValueOf(t)) - } - } - case bool: - switch f.Kind() { - case reflect.Bool: - f.SetBool(val) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not a bool", - } - } - case float32: - switch f.Kind() { - case reflect.Float32: - f.SetFloat(float64(val)) - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: "not a Float32", - } - } - default: - if f.Kind() == reflect.Slice { - switch f.Type().Elem().Kind() { - case reflect.String: - safeArray := prop.ToArray() - if safeArray != nil { - arr := safeArray.ToValueArray() - fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) - for i, v := range arr { - s := fArr.Index(i) - s.SetString(v.(string)) - } - f.Set(fArr) - } - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - safeArray := prop.ToArray() - if safeArray != nil { - arr := safeArray.ToValueArray() - fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) - for i, v := range arr { - s := fArr.Index(i) - s.SetUint(reflect.ValueOf(v).Uint()) - } - f.Set(fArr) - } - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - safeArray := prop.ToArray() - if safeArray != nil { - arr := safeArray.ToValueArray() - fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr)) - for i, v := range arr { - s := fArr.Index(i) - s.SetInt(reflect.ValueOf(v).Int()) - } - f.Set(fArr) - } - default: - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: fmt.Sprintf("unsupported slice type (%T)", val), - } - } - } else { - typeof := reflect.TypeOf(val) - if typeof == nil && (isPtr || c.NonePtrZero) { - if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) { - of.Set(reflect.Zero(of.Type())) - } - break - } - return &ErrFieldMismatch{ - StructType: of.Type(), - FieldName: n, - Reason: fmt.Sprintf("unsupported type (%T)", val), - } - } - } - } - return errFieldMismatch -} - -type multiArgType int - -const ( - multiArgTypeInvalid multiArgType = iota - multiArgTypeStruct - multiArgTypeStructPtr -) - -// checkMultiArg checks that v has type []S, []*S for some struct type S. -// -// It returns what category the slice's elements are, and the reflect.Type -// that represents S. -func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { - if v.Kind() != reflect.Slice { - return multiArgTypeInvalid, nil - } - elemType = v.Type().Elem() - switch elemType.Kind() { - case reflect.Struct: - return multiArgTypeStruct, elemType - case reflect.Ptr: - elemType = elemType.Elem() - if elemType.Kind() == reflect.Struct { - return multiArgTypeStructPtr, elemType - } - } - return multiArgTypeInvalid, nil -} - -func oleInt64(item *ole.IDispatch, prop string) (int64, error) { - v, err := oleutil.GetProperty(item, prop) - if err != nil { - return 0, err - } - defer v.Clear() - - i := int64(v.Val) - return i, nil -} - -// CreateQuery returns a WQL query string that queries all columns of src. where -// is an optional string that is appended to the query, to be used with WHERE -// clauses. In such a case, the "WHERE" string should appear at the beginning. -func CreateQuery(src interface{}, where string) string { - var b bytes.Buffer - b.WriteString("SELECT ") - s := reflect.Indirect(reflect.ValueOf(src)) - t := s.Type() - if s.Kind() == reflect.Slice { - t = t.Elem() - } - if t.Kind() != reflect.Struct { - return "" - } - var fields []string - for i := 0; i < t.NumField(); i++ { - fields = append(fields, t.Field(i).Name) - } - b.WriteString(strings.Join(fields, ", ")) - b.WriteString(" FROM ") - b.WriteString(t.Name()) - b.WriteString(" " + where) - return b.String() -} diff --git a/vendor/github.com/coreos/go-json/README b/vendor/github.com/ajeddeloh/go-json/README similarity index 100% rename from vendor/github.com/coreos/go-json/README rename to vendor/github.com/ajeddeloh/go-json/README diff --git a/vendor/github.com/coreos/go-json/decode.go b/vendor/github.com/ajeddeloh/go-json/decode.go similarity index 100% rename from vendor/github.com/coreos/go-json/decode.go rename to vendor/github.com/ajeddeloh/go-json/decode.go diff --git a/vendor/github.com/coreos/go-json/encode.go b/vendor/github.com/ajeddeloh/go-json/encode.go similarity index 100% rename from vendor/github.com/coreos/go-json/encode.go rename to vendor/github.com/ajeddeloh/go-json/encode.go diff --git a/vendor/github.com/coreos/go-json/fold.go b/vendor/github.com/ajeddeloh/go-json/fold.go similarity index 100% rename from vendor/github.com/coreos/go-json/fold.go rename to vendor/github.com/ajeddeloh/go-json/fold.go diff --git a/vendor/github.com/coreos/go-json/indent.go b/vendor/github.com/ajeddeloh/go-json/indent.go similarity index 100% rename from vendor/github.com/coreos/go-json/indent.go rename to vendor/github.com/ajeddeloh/go-json/indent.go diff --git a/vendor/github.com/coreos/go-json/scanner.go b/vendor/github.com/ajeddeloh/go-json/scanner.go similarity index 100% rename from vendor/github.com/coreos/go-json/scanner.go rename to vendor/github.com/ajeddeloh/go-json/scanner.go diff --git a/vendor/github.com/coreos/go-json/stream.go b/vendor/github.com/ajeddeloh/go-json/stream.go similarity index 100% rename from vendor/github.com/coreos/go-json/stream.go rename to vendor/github.com/ajeddeloh/go-json/stream.go diff --git a/vendor/github.com/coreos/go-json/tags.go b/vendor/github.com/ajeddeloh/go-json/tags.go similarity index 100% rename from vendor/github.com/coreos/go-json/tags.go rename to vendor/github.com/ajeddeloh/go-json/tags.go diff --git a/vendor/github.com/asaskevich/govalidator/.gitignore b/vendor/github.com/asaskevich/govalidator/.gitignore deleted file mode 100644 index 8d69a9418..000000000 --- a/vendor/github.com/asaskevich/govalidator/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -bin/ -.idea/ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - diff --git a/vendor/github.com/asaskevich/govalidator/.travis.yml b/vendor/github.com/asaskevich/govalidator/.travis.yml deleted file mode 100644 index bb83c6670..000000000 --- a/vendor/github.com/asaskevich/govalidator/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -dist: xenial -go: - - '1.10' - - '1.11' - - '1.12' - - '1.13' - - 'tip' - -script: - - go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md b/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md deleted file mode 100644 index 4b462b0d8..000000000 --- a/vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,43 +0,0 @@ -# Contributor Code of Conduct - -This project adheres to [The Code Manifesto](http://codemanifesto.com) -as its guidelines for contributor interactions. - -## The Code Manifesto - -We want to work in an ecosystem that empowers developers to reach their -potential — one that encourages growth and effective collaboration. A space -that is safe for all. - -A space such as this benefits everyone that participates in it. It encourages -new developers to enter our field. It is through discussion and collaboration -that we grow, and through growth that we improve. - -In the effort to create such a place, we hold to these values: - -1. **Discrimination limits us.** This includes discrimination on the basis of - race, gender, sexual orientation, gender identity, age, nationality, - technology and any other arbitrary exclusion of a group of people. -2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort - levels. Remember that, and if brought to your attention, heed it. -3. **We are our biggest assets.** None of us were born masters of our trade. - Each of us has been helped along the way. Return that favor, when and where - you can. -4. **We are resources for the future.** As an extension of #3, share what you - know. Make yourself a resource to help those that come after you. -5. **Respect defines us.** Treat others as you wish to be treated. Make your - discussions, criticisms and debates from a position of respectfulness. Ask - yourself, is it true? Is it necessary? Is it constructive? Anything less is - unacceptable. -6. **Reactions require grace.** Angry responses are valid, but abusive language - and vindictive actions are toxic. When something happens that offends you, - handle it assertively, but be respectful. Escalate reasonably, and try to - allow the offender an opportunity to explain themselves, and possibly - correct the issue. -7. **Opinions are just that: opinions.** Each and every one of us, due to our - background and upbringing, have varying opinions. That is perfectly - acceptable. Remember this: if you respect your own opinions, you should - respect the opinions of others. -8. **To err is human.** You might not intend it, but mistakes do happen and - contribute to build experience. Tolerate honest mistakes, and don't - hesitate to apologize if you make one yourself. diff --git a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md b/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md deleted file mode 100644 index 7ed268a1e..000000000 --- a/vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md +++ /dev/null @@ -1,63 +0,0 @@ -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Financial contributions - -We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator). -Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed. - - -## Credits - - -### Contributors - -Thank you to all the people who have already contributed to govalidator! - - - -### Backers - -Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor)) - - - - - - - - - - - \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/LICENSE b/vendor/github.com/asaskevich/govalidator/LICENSE deleted file mode 100644 index cacba9102..000000000 --- a/vendor/github.com/asaskevich/govalidator/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014-2020 Alex Saskevich - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/asaskevich/govalidator/README.md b/vendor/github.com/asaskevich/govalidator/README.md deleted file mode 100644 index 39121ea8e..000000000 --- a/vendor/github.com/asaskevich/govalidator/README.md +++ /dev/null @@ -1,619 +0,0 @@ -govalidator -=========== -[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![GoDoc](https://godoc.org/github.com/asaskevich/govalidator?status.png)](https://godoc.org/github.com/asaskevich/govalidator) -[![Build Status](https://travis-ci.org/asaskevich/govalidator.svg?branch=master)](https://travis-ci.org/asaskevich/govalidator) -[![Coverage](https://codecov.io/gh/asaskevich/govalidator/branch/master/graph/badge.svg)](https://codecov.io/gh/asaskevich/govalidator) [![Go Report Card](https://goreportcard.com/badge/github.com/asaskevich/govalidator)](https://goreportcard.com/report/github.com/asaskevich/govalidator) [![GoSearch](http://go-search.org/badge?id=github.com%2Fasaskevich%2Fgovalidator)](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [![Backers on Open Collective](https://opencollective.com/govalidator/backers/badge.svg)](#backers) [![Sponsors on Open Collective](https://opencollective.com/govalidator/sponsors/badge.svg)](#sponsors) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield) - -A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js). - -#### Installation -Make sure that Go is installed on your computer. -Type the following command in your terminal: - - go get github.com/asaskevich/govalidator - -or you can get specified release of the package with `gopkg.in`: - - go get gopkg.in/asaskevich/govalidator.v10 - -After it the package is ready to use. - - -#### Import package in your project -Add following line in your `*.go` file: -```go -import "github.com/asaskevich/govalidator" -``` -If you are unhappy to use long `govalidator`, you can do something like this: -```go -import ( - valid "github.com/asaskevich/govalidator" -) -``` - -#### Activate behavior to require all fields have a validation tag by default -`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function. - -`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors. - -```go -import "github.com/asaskevich/govalidator" - -func init() { - govalidator.SetFieldsRequiredByDefault(true) -} -``` - -Here's some code to explain it: -```go -// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -type exampleStruct struct { - Name string `` - Email string `valid:"email"` -} - -// this, however, will only fail when Email is empty or an invalid email address: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email"` -} - -// lastly, this will only fail when Email is an invalid email address but not when it's empty: -type exampleStruct2 struct { - Name string `valid:"-"` - Email string `valid:"email,optional"` -} -``` - -#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123)) -##### Custom validator function signature -A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible. -```go -import "github.com/asaskevich/govalidator" - -// old signature -func(i interface{}) bool - -// new signature -func(i interface{}, o interface{}) bool -``` - -##### Adding a custom validator -This was changed to prevent data races when accessing custom validators. -```go -import "github.com/asaskevich/govalidator" - -// before -govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool { - // ... -} - -// after -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool { - // ... -}) -``` - -#### List of functions: -```go -func Abs(value float64) float64 -func BlackList(str, chars string) string -func ByteLength(str string, params ...string) bool -func CamelCaseToUnderscore(str string) string -func Contains(str, substring string) bool -func Count(array []interface{}, iterator ConditionIterator) int -func Each(array []interface{}, iterator Iterator) -func ErrorByField(e error, field string) string -func ErrorsByField(e error) map[string]string -func Filter(array []interface{}, iterator ConditionIterator) []interface{} -func Find(array []interface{}, iterator ConditionIterator) interface{} -func GetLine(s string, index int) (string, error) -func GetLines(s string) []string -func HasLowerCase(str string) bool -func HasUpperCase(str string) bool -func HasWhitespace(str string) bool -func HasWhitespaceOnly(str string) bool -func InRange(value interface{}, left interface{}, right interface{}) bool -func InRangeFloat32(value, left, right float32) bool -func InRangeFloat64(value, left, right float64) bool -func InRangeInt(value, left, right interface{}) bool -func IsASCII(str string) bool -func IsAlpha(str string) bool -func IsAlphanumeric(str string) bool -func IsBase64(str string) bool -func IsByteLength(str string, min, max int) bool -func IsCIDR(str string) bool -func IsCRC32(str string) bool -func IsCRC32b(str string) bool -func IsCreditCard(str string) bool -func IsDNSName(str string) bool -func IsDataURI(str string) bool -func IsDialString(str string) bool -func IsDivisibleBy(str, num string) bool -func IsEmail(str string) bool -func IsExistingEmail(email string) bool -func IsFilePath(str string) (bool, int) -func IsFloat(str string) bool -func IsFullWidth(str string) bool -func IsHalfWidth(str string) bool -func IsHash(str string, algorithm string) bool -func IsHexadecimal(str string) bool -func IsHexcolor(str string) bool -func IsHost(str string) bool -func IsIP(str string) bool -func IsIPv4(str string) bool -func IsIPv6(str string) bool -func IsISBN(str string, version int) bool -func IsISBN10(str string) bool -func IsISBN13(str string) bool -func IsISO3166Alpha2(str string) bool -func IsISO3166Alpha3(str string) bool -func IsISO4217(str string) bool -func IsISO693Alpha2(str string) bool -func IsISO693Alpha3b(str string) bool -func IsIn(str string, params ...string) bool -func IsInRaw(str string, params ...string) bool -func IsInt(str string) bool -func IsJSON(str string) bool -func IsLatitude(str string) bool -func IsLongitude(str string) bool -func IsLowerCase(str string) bool -func IsMAC(str string) bool -func IsMD4(str string) bool -func IsMD5(str string) bool -func IsMagnetURI(str string) bool -func IsMongoID(str string) bool -func IsMultibyte(str string) bool -func IsNatural(value float64) bool -func IsNegative(value float64) bool -func IsNonNegative(value float64) bool -func IsNonPositive(value float64) bool -func IsNotNull(str string) bool -func IsNull(str string) bool -func IsNumeric(str string) bool -func IsPort(str string) bool -func IsPositive(value float64) bool -func IsPrintableASCII(str string) bool -func IsRFC3339(str string) bool -func IsRFC3339WithoutZone(str string) bool -func IsRGBcolor(str string) bool -func IsRequestURI(rawurl string) bool -func IsRequestURL(rawurl string) bool -func IsRipeMD128(str string) bool -func IsRipeMD160(str string) bool -func IsRsaPub(str string, params ...string) bool -func IsRsaPublicKey(str string, keylen int) bool -func IsSHA1(str string) bool -func IsSHA256(str string) bool -func IsSHA384(str string) bool -func IsSHA512(str string) bool -func IsSSN(str string) bool -func IsSemver(str string) bool -func IsTiger128(str string) bool -func IsTiger160(str string) bool -func IsTiger192(str string) bool -func IsTime(str string, format string) bool -func IsType(v interface{}, params ...string) bool -func IsURL(str string) bool -func IsUTFDigit(str string) bool -func IsUTFLetter(str string) bool -func IsUTFLetterNumeric(str string) bool -func IsUTFNumeric(str string) bool -func IsUUID(str string) bool -func IsUUIDv3(str string) bool -func IsUUIDv4(str string) bool -func IsUUIDv5(str string) bool -func IsUnixTime(str string) bool -func IsUpperCase(str string) bool -func IsVariableWidth(str string) bool -func IsWhole(value float64) bool -func LeftTrim(str, chars string) string -func Map(array []interface{}, iterator ResultIterator) []interface{} -func Matches(str, pattern string) bool -func MaxStringLength(str string, params ...string) bool -func MinStringLength(str string, params ...string) bool -func NormalizeEmail(str string) (string, error) -func PadBoth(str string, padStr string, padLen int) string -func PadLeft(str string, padStr string, padLen int) string -func PadRight(str string, padStr string, padLen int) string -func PrependPathToErrors(err error, path string) error -func Range(str string, params ...string) bool -func RemoveTags(s string) string -func ReplacePattern(str, pattern, replace string) string -func Reverse(s string) string -func RightTrim(str, chars string) string -func RuneLength(str string, params ...string) bool -func SafeFileName(str string) string -func SetFieldsRequiredByDefault(value bool) -func SetNilPtrAllowedByRequired(value bool) -func Sign(value float64) float64 -func StringLength(str string, params ...string) bool -func StringMatches(s string, params ...string) bool -func StripLow(str string, keepNewLines bool) string -func ToBoolean(str string) (bool, error) -func ToFloat(str string) (float64, error) -func ToInt(value interface{}) (res int64, err error) -func ToJSON(obj interface{}) (string, error) -func ToString(obj interface{}) string -func Trim(str, chars string) string -func Truncate(str string, length int, ending string) string -func TruncatingErrorf(str string, args ...interface{}) error -func UnderscoreToCamelCase(s string) string -func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error) -func ValidateStruct(s interface{}) (bool, error) -func WhiteList(str, chars string) string -type ConditionIterator -type CustomTypeValidator -type Error -func (e Error) Error() string -type Errors -func (es Errors) Error() string -func (es Errors) Errors() []error -type ISO3166Entry -type ISO693Entry -type InterfaceParamValidator -type Iterator -type ParamValidator -type ResultIterator -type UnsupportedTypeError -func (e *UnsupportedTypeError) Error() string -type Validator -``` - -#### Examples -###### IsURL -```go -println(govalidator.IsURL(`http://user@pass:domain.com/path/page`)) -``` -###### IsType -```go -println(govalidator.IsType("Bob", "string")) -println(govalidator.IsType(1, "int")) -i := 1 -println(govalidator.IsType(&i, "*int")) -``` - -IsType can be used through the tag `type` which is essential for map validation: -```go -type User struct { - Name string `valid:"type(string)"` - Age int `valid:"type(int)"` - Meta interface{} `valid:"type(string)"` -} -result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"}) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ToString -```go -type User struct { - FirstName string - LastName string -} - -str := govalidator.ToString(&User{"John", "Juan"}) -println(str) -``` -###### Each, Map, Filter, Count for slices -Each iterates over the slice/array and calls Iterator for every item -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.Iterator = func(value interface{}, index int) { - println(value.(int)) -} -govalidator.Each(data, fn) -``` -```go -data := []interface{}{1, 2, 3, 4, 5} -var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} { - return value.(int) * 3 -} -_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15} -``` -```go -data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} -var fn govalidator.ConditionIterator = func(value interface{}, index int) bool { - return value.(int)%2 == 0 -} -_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10} -_ = govalidator.Count(data, fn) // result = 5 -``` -###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2) -If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this: -```go -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) -``` -For completely custom validators (interface-based), see below. - -Here is a list of available validators for struct fields (validator - used function): -```go -"email": IsEmail, -"url": IsURL, -"dialstring": IsDialString, -"requrl": IsRequestURL, -"requri": IsRequestURI, -"alpha": IsAlpha, -"utfletter": IsUTFLetter, -"alphanum": IsAlphanumeric, -"utfletternum": IsUTFLetterNumeric, -"numeric": IsNumeric, -"utfnumeric": IsUTFNumeric, -"utfdigit": IsUTFDigit, -"hexadecimal": IsHexadecimal, -"hexcolor": IsHexcolor, -"rgbcolor": IsRGBcolor, -"lowercase": IsLowerCase, -"uppercase": IsUpperCase, -"int": IsInt, -"float": IsFloat, -"null": IsNull, -"uuid": IsUUID, -"uuidv3": IsUUIDv3, -"uuidv4": IsUUIDv4, -"uuidv5": IsUUIDv5, -"creditcard": IsCreditCard, -"isbn10": IsISBN10, -"isbn13": IsISBN13, -"json": IsJSON, -"multibyte": IsMultibyte, -"ascii": IsASCII, -"printableascii": IsPrintableASCII, -"fullwidth": IsFullWidth, -"halfwidth": IsHalfWidth, -"variablewidth": IsVariableWidth, -"base64": IsBase64, -"datauri": IsDataURI, -"ip": IsIP, -"port": IsPort, -"ipv4": IsIPv4, -"ipv6": IsIPv6, -"dns": IsDNSName, -"host": IsHost, -"mac": IsMAC, -"latitude": IsLatitude, -"longitude": IsLongitude, -"ssn": IsSSN, -"semver": IsSemver, -"rfc3339": IsRFC3339, -"rfc3339WithoutZone": IsRFC3339WithoutZone, -"ISO3166Alpha2": IsISO3166Alpha2, -"ISO3166Alpha3": IsISO3166Alpha3, -``` -Validators with parameters - -```go -"range(min|max)": Range, -"length(min|max)": ByteLength, -"runelength(min|max)": RuneLength, -"stringlength(min|max)": StringLength, -"matches(pattern)": StringMatches, -"in(string1|string2|...|stringN)": IsIn, -"rsapub(keylength)" : IsRsaPub, -"minstringlength(int): MinStringLength, -"maxstringlength(int): MaxStringLength, -``` -Validators with parameters for any type - -```go -"type(type)": IsType, -``` - -And here is small example of usage: -```go -type Post struct { - Title string `valid:"alphanum,required"` - Message string `valid:"duck,ascii"` - Message2 string `valid:"animal(dog)"` - AuthorIP string `valid:"ipv4"` - Date string `valid:"-"` -} -post := &Post{ - Title: "My Example Post", - Message: "duck", - Message2: "dog", - AuthorIP: "123.234.54.3", -} - -// Add your own struct validation tags -govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool { - return str == "duck" -}) - -// Add your own struct validation tags with parameter -govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool { - species := params[0] - return str == species -}) -govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$") - -result, err := govalidator.ValidateStruct(post) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` -###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338) -If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}` - -So here is small example of usage: -```go -var mapTemplate = map[string]interface{}{ - "name":"required,alpha", - "family":"required,alpha", - "email":"required,email", - "cell-phone":"numeric", - "address":map[string]interface{}{ - "line1":"required,alphanum", - "line2":"alphanum", - "postal-code":"numeric", - }, -} - -var inputMap = map[string]interface{}{ - "name":"Bob", - "family":"Smith", - "email":"foo@bar.baz", - "address":map[string]interface{}{ - "line1":"", - "line2":"", - "postal-code":"", - }, -} - -result, err := govalidator.ValidateMap(inputMap, mapTemplate) -if err != nil { - println("error: " + err.Error()) -} -println(result) -``` - -###### WhiteList -```go -// Remove all characters from string ignoring characters between "a" and "z" -println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa") -``` - -###### Custom validation functions -Custom validation using your own domain specific validators is also available - here's an example of how to use it: -```go -import "github.com/asaskevich/govalidator" - -type CustomByteArray [6]byte // custom types are supported and can be validated - -type StructWithCustomByteArray struct { - ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence - Email string `valid:"email"` - CustomMinLength int `valid:"-"` -} - -govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // you can type switch on the context interface being validated - case StructWithCustomByteArray: - // you can check and validate against some other field in the context, - // return early or not validate against the context at all – your choice - case SomeOtherType: - // ... - default: - // expecting some other type? Throw/panic here or continue - } - - switch v := i.(type) { // type switch on the struct field being validated - case CustomByteArray: - for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes - if e != 0 { - return true - } - } - } - return false -}) -govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool { - switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation - case StructWithCustomByteArray: - return len(v.ID) >= v.CustomMinLength - } - return false -}) -``` - -###### Loop over Error() -By default .Error() returns all errors in a single String. To access each error you can do this: -```go - if err != nil { - errs := err.(govalidator.Errors).Errors() - for _, e := range errs { - fmt.Println(e.Error()) - } - } -``` - -###### Custom error messages -Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it: -```go -type Ticket struct { - Id int64 `json:"id"` - FirstName string `json:"firstname" valid:"required~First name is blank"` -} -``` - -#### Notes -Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator). -Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator). - -#### Support -If you do have a contribution to the package, feel free to create a Pull Request or an Issue. - -#### What to contribute -If you don't know what to do, there are some features and functions that need to be done - -- [ ] Refactor code -- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check -- [ ] Create actual list of contributors and projects that currently using this package -- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues) -- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions) -- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new -- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc -- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224) -- [ ] Implement fuzzing testing -- [ ] Implement some struct/map/array utilities -- [ ] Implement map/array validation -- [ ] Implement benchmarking -- [ ] Implement batch of examples -- [ ] Look at forks for new features and fixes - -#### Advice -Feel free to create what you want, but keep in mind when you implement new features: -- Code must be clear and readable, names of variables/constants clearly describes what they are doing -- Public functions must be documented and described in source file and added to README.md to the list of available functions -- There are must be unit-tests for any new functions and improvements - -## Credits -### Contributors - -This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)]. - -#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors) -* [Daniel Lohse](https://github.com/annismckenzie) -* [Attila Oláh](https://github.com/attilaolah) -* [Daniel Korner](https://github.com/Dadie) -* [Steven Wilkin](https://github.com/stevenwilkin) -* [Deiwin Sarjas](https://github.com/deiwin) -* [Noah Shibley](https://github.com/slugmobile) -* [Nathan Davies](https://github.com/nathj07) -* [Matt Sanford](https://github.com/mzsanford) -* [Simon ccl1115](https://github.com/ccl1115) - - - - -### Backers - -Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)] - - - - -### Sponsors - -Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)] - - - - - - - - - - - - - - - -## License -[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large) diff --git a/vendor/github.com/asaskevich/govalidator/arrays.go b/vendor/github.com/asaskevich/govalidator/arrays.go deleted file mode 100644 index 3e1da7cb4..000000000 --- a/vendor/github.com/asaskevich/govalidator/arrays.go +++ /dev/null @@ -1,87 +0,0 @@ -package govalidator - -// Iterator is the function that accepts element of slice/array and its index -type Iterator func(interface{}, int) - -// ResultIterator is the function that accepts element of slice/array and its index and returns any result -type ResultIterator func(interface{}, int) interface{} - -// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean -type ConditionIterator func(interface{}, int) bool - -// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values -type ReduceIterator func(interface{}, interface{}) interface{} - -// Some validates that any item of array corresponds to ConditionIterator. Returns boolean. -func Some(array []interface{}, iterator ConditionIterator) bool { - res := false - for index, data := range array { - res = res || iterator(data, index) - } - return res -} - -// Every validates that every item of array corresponds to ConditionIterator. Returns boolean. -func Every(array []interface{}, iterator ConditionIterator) bool { - res := true - for index, data := range array { - res = res && iterator(data, index) - } - return res -} - -// Reduce boils down a list of values into a single value by ReduceIterator -func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} { - for _, data := range array { - initialValue = iterator(initialValue, data) - } - return initialValue -} - -// Each iterates over the slice and apply Iterator to every item -func Each(array []interface{}, iterator Iterator) { - for index, data := range array { - iterator(data, index) - } -} - -// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result. -func Map(array []interface{}, iterator ResultIterator) []interface{} { - var result = make([]interface{}, len(array)) - for index, data := range array { - result[index] = iterator(data, index) - } - return result -} - -// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise. -func Find(array []interface{}, iterator ConditionIterator) interface{} { - for index, data := range array { - if iterator(data, index) { - return data - } - } - return nil -} - -// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice. -func Filter(array []interface{}, iterator ConditionIterator) []interface{} { - var result = make([]interface{}, 0) - for index, data := range array { - if iterator(data, index) { - result = append(result, data) - } - } - return result -} - -// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator. -func Count(array []interface{}, iterator ConditionIterator) int { - count := 0 - for index, data := range array { - if iterator(data, index) { - count = count + 1 - } - } - return count -} diff --git a/vendor/github.com/asaskevich/govalidator/converter.go b/vendor/github.com/asaskevich/govalidator/converter.go deleted file mode 100644 index d68e990fc..000000000 --- a/vendor/github.com/asaskevich/govalidator/converter.go +++ /dev/null @@ -1,81 +0,0 @@ -package govalidator - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" -) - -// ToString convert the input to a string. -func ToString(obj interface{}) string { - res := fmt.Sprintf("%v", obj) - return res -} - -// ToJSON convert the input to a valid JSON string -func ToJSON(obj interface{}) (string, error) { - res, err := json.Marshal(obj) - if err != nil { - res = []byte("") - } - return string(res), err -} - -// ToFloat convert the input string to a float, or 0.0 if the input is not a float. -func ToFloat(value interface{}) (res float64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = float64(val.Int()) - case uint, uint8, uint16, uint32, uint64: - res = float64(val.Uint()) - case float32, float64: - res = val.Float() - case string: - res, err = strconv.ParseFloat(val.String(), 64) - if err != nil { - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer. -func ToInt(value interface{}) (res int64, err error) { - val := reflect.ValueOf(value) - - switch value.(type) { - case int, int8, int16, int32, int64: - res = val.Int() - case uint, uint8, uint16, uint32, uint64: - res = int64(val.Uint()) - case float32, float64: - res = int64(val.Float()) - case string: - if IsInt(val.String()) { - res, err = strconv.ParseInt(val.String(), 0, 64) - if err != nil { - res = 0 - } - } else { - err = fmt.Errorf("ToInt: invalid numeric format %g", value) - res = 0 - } - default: - err = fmt.Errorf("ToInt: unknown interface type %T", value) - res = 0 - } - - return -} - -// ToBoolean convert the input string to a boolean. -func ToBoolean(str string) (bool, error) { - return strconv.ParseBool(str) -} diff --git a/vendor/github.com/asaskevich/govalidator/doc.go b/vendor/github.com/asaskevich/govalidator/doc.go deleted file mode 100644 index 55dce62dc..000000000 --- a/vendor/github.com/asaskevich/govalidator/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -package govalidator - -// A package of validators and sanitizers for strings, structures and collections. diff --git a/vendor/github.com/asaskevich/govalidator/error.go b/vendor/github.com/asaskevich/govalidator/error.go deleted file mode 100644 index 1da2336f4..000000000 --- a/vendor/github.com/asaskevich/govalidator/error.go +++ /dev/null @@ -1,47 +0,0 @@ -package govalidator - -import ( - "sort" - "strings" -) - -// Errors is an array of multiple errors and conforms to the error interface. -type Errors []error - -// Errors returns itself. -func (es Errors) Errors() []error { - return es -} - -func (es Errors) Error() string { - var errs []string - for _, e := range es { - errs = append(errs, e.Error()) - } - sort.Strings(errs) - return strings.Join(errs, ";") -} - -// Error encapsulates a name, an error and whether there's a custom error message or not. -type Error struct { - Name string - Err error - CustomErrorMessageExists bool - - // Validator indicates the name of the validator that failed - Validator string - Path []string -} - -func (e Error) Error() string { - if e.CustomErrorMessageExists { - return e.Err.Error() - } - - errName := e.Name - if len(e.Path) > 0 { - errName = strings.Join(append(e.Path, e.Name), ".") - } - - return errName + ": " + e.Err.Error() -} diff --git a/vendor/github.com/asaskevich/govalidator/numerics.go b/vendor/github.com/asaskevich/govalidator/numerics.go deleted file mode 100644 index 5041d9e86..000000000 --- a/vendor/github.com/asaskevich/govalidator/numerics.go +++ /dev/null @@ -1,100 +0,0 @@ -package govalidator - -import ( - "math" -) - -// Abs returns absolute value of number -func Abs(value float64) float64 { - return math.Abs(value) -} - -// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise -func Sign(value float64) float64 { - if value > 0 { - return 1 - } else if value < 0 { - return -1 - } else { - return 0 - } -} - -// IsNegative returns true if value < 0 -func IsNegative(value float64) bool { - return value < 0 -} - -// IsPositive returns true if value > 0 -func IsPositive(value float64) bool { - return value > 0 -} - -// IsNonNegative returns true if value >= 0 -func IsNonNegative(value float64) bool { - return value >= 0 -} - -// IsNonPositive returns true if value <= 0 -func IsNonPositive(value float64) bool { - return value <= 0 -} - -// InRangeInt returns true if value lies between left and right border -func InRangeInt(value, left, right interface{}) bool { - value64, _ := ToInt(value) - left64, _ := ToInt(left) - right64, _ := ToInt(right) - if left64 > right64 { - left64, right64 = right64, left64 - } - return value64 >= left64 && value64 <= right64 -} - -// InRangeFloat32 returns true if value lies between left and right border -func InRangeFloat32(value, left, right float32) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRangeFloat64 returns true if value lies between left and right border -func InRangeFloat64(value, left, right float64) bool { - if left > right { - left, right = right, left - } - return value >= left && value <= right -} - -// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string. -// All types must the same type. -// False if value doesn't lie in range or if it incompatible or not comparable -func InRange(value interface{}, left interface{}, right interface{}) bool { - switch value.(type) { - case int: - intValue, _ := ToInt(value) - intLeft, _ := ToInt(left) - intRight, _ := ToInt(right) - return InRangeInt(intValue, intLeft, intRight) - case float32, float64: - intValue, _ := ToFloat(value) - intLeft, _ := ToFloat(left) - intRight, _ := ToFloat(right) - return InRangeFloat64(intValue, intLeft, intRight) - case string: - return value.(string) >= left.(string) && value.(string) <= right.(string) - default: - return false - } -} - -// IsWhole returns true if value is whole number -func IsWhole(value float64) bool { - return math.Remainder(value, 1) == 0 -} - -// IsNatural returns true if value is natural number (positive and whole) -func IsNatural(value float64) bool { - return IsWhole(value) && IsPositive(value) -} diff --git a/vendor/github.com/asaskevich/govalidator/patterns.go b/vendor/github.com/asaskevich/govalidator/patterns.go deleted file mode 100644 index 106ed94f8..000000000 --- a/vendor/github.com/asaskevich/govalidator/patterns.go +++ /dev/null @@ -1,107 +0,0 @@ -package govalidator - -import "regexp" - -// Basic regular expressions for validating strings -const ( - Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" - CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$" - ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$" - ISBN13 string = "^(?:[0-9]{13})$" - UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" - UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" - UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" - Alpha string = "^[a-zA-Z]+$" - Alphanumeric string = "^[a-zA-Z0-9]+$" - Numeric string = "^[0-9]+$" - Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$" - Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$" - Hexadecimal string = "^[0-9a-fA-F]+$" - Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" - RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$" - ASCII string = "^[\x00-\x7F]+$" - Multibyte string = "[^\x00-\x7F]" - FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]" - Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" - PrintableASCII string = "^[\x20-\x7E]+$" - DataURI string = "^data:.+\\/(.+);base64$" - MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$" - Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" - Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" - DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$` - IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` - URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)` - URLUsername string = `(\S+(:\S*)?@)` - URLPath string = `((\/|\?|#)[^\s]*)` - URLPort string = `(:(\d{1,5}))` - URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))` - URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` - URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` - SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$` - WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$` - UnixPath string = `^(/[^/\x00]*)+/?$` - Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$" - tagName string = "valid" - hasLowerCase string = ".*[[:lower:]]" - hasUpperCase string = ".*[[:upper:]]" - hasWhitespace string = ".*[[:space:]]" - hasWhitespaceOnly string = "^[[:space:]]+$" - IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$" - IMSI string = "^\\d{14,15}$" -) - -// Used by IsFilePath func -const ( - // Unknown is unresolved OS type - Unknown = iota - // Win is Windows type - Win - // Unix is *nix OS types - Unix -) - -var ( - userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$") - hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$") - userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})") - rxEmail = regexp.MustCompile(Email) - rxCreditCard = regexp.MustCompile(CreditCard) - rxISBN10 = regexp.MustCompile(ISBN10) - rxISBN13 = regexp.MustCompile(ISBN13) - rxUUID3 = regexp.MustCompile(UUID3) - rxUUID4 = regexp.MustCompile(UUID4) - rxUUID5 = regexp.MustCompile(UUID5) - rxUUID = regexp.MustCompile(UUID) - rxAlpha = regexp.MustCompile(Alpha) - rxAlphanumeric = regexp.MustCompile(Alphanumeric) - rxNumeric = regexp.MustCompile(Numeric) - rxInt = regexp.MustCompile(Int) - rxFloat = regexp.MustCompile(Float) - rxHexadecimal = regexp.MustCompile(Hexadecimal) - rxHexcolor = regexp.MustCompile(Hexcolor) - rxRGBcolor = regexp.MustCompile(RGBcolor) - rxASCII = regexp.MustCompile(ASCII) - rxPrintableASCII = regexp.MustCompile(PrintableASCII) - rxMultibyte = regexp.MustCompile(Multibyte) - rxFullWidth = regexp.MustCompile(FullWidth) - rxHalfWidth = regexp.MustCompile(HalfWidth) - rxBase64 = regexp.MustCompile(Base64) - rxDataURI = regexp.MustCompile(DataURI) - rxMagnetURI = regexp.MustCompile(MagnetURI) - rxLatitude = regexp.MustCompile(Latitude) - rxLongitude = regexp.MustCompile(Longitude) - rxDNSName = regexp.MustCompile(DNSName) - rxURL = regexp.MustCompile(URL) - rxSSN = regexp.MustCompile(SSN) - rxWinPath = regexp.MustCompile(WinPath) - rxUnixPath = regexp.MustCompile(UnixPath) - rxSemver = regexp.MustCompile(Semver) - rxHasLowerCase = regexp.MustCompile(hasLowerCase) - rxHasUpperCase = regexp.MustCompile(hasUpperCase) - rxHasWhitespace = regexp.MustCompile(hasWhitespace) - rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly) - rxIMEI = regexp.MustCompile(IMEI) - rxIMSI = regexp.MustCompile(IMSI) -) diff --git a/vendor/github.com/asaskevich/govalidator/types.go b/vendor/github.com/asaskevich/govalidator/types.go deleted file mode 100644 index 54218bf05..000000000 --- a/vendor/github.com/asaskevich/govalidator/types.go +++ /dev/null @@ -1,655 +0,0 @@ -package govalidator - -import ( - "reflect" - "regexp" - "sort" - "sync" -) - -// Validator is a wrapper for a validator function that returns bool and accepts string. -type Validator func(str string) bool - -// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type. -// The second parameter should be the context (in the case of validating a struct: the whole object being validated). -type CustomTypeValidator func(i interface{}, o interface{}) bool - -// ParamValidator is a wrapper for validator functions that accept additional parameters. -type ParamValidator func(str string, params ...string) bool - -// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value -type InterfaceParamValidator func(in interface{}, params ...string) bool -type tagOptionsMap map[string]tagOption - -func (t tagOptionsMap) orderedKeys() []string { - var keys []string - for k := range t { - keys = append(keys, k) - } - - sort.Slice(keys, func(a, b int) bool { - return t[keys[a]].order < t[keys[b]].order - }) - - return keys -} - -type tagOption struct { - name string - customErrorMessage string - order int -} - -// UnsupportedTypeError is a wrapper for reflect.Type -type UnsupportedTypeError struct { - Type reflect.Type -} - -// stringValues is a slice of reflect.Value holding *reflect.StringValue. -// It implements the methods to sort by string. -type stringValues []reflect.Value - -// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value -var InterfaceParamTagMap = map[string]InterfaceParamValidator{ - "type": IsType, -} - -// InterfaceParamTagRegexMap maps interface param tags to their respective regexes. -var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{ - "type": regexp.MustCompile(`^type\((.*)\)$`), -} - -// ParamTagMap is a map of functions accept variants parameters -var ParamTagMap = map[string]ParamValidator{ - "length": ByteLength, - "range": Range, - "runelength": RuneLength, - "stringlength": StringLength, - "matches": StringMatches, - "in": IsInRaw, - "rsapub": IsRsaPub, - "minstringlength": MinStringLength, - "maxstringlength": MaxStringLength, -} - -// ParamTagRegexMap maps param tags to their respective regexes. -var ParamTagRegexMap = map[string]*regexp.Regexp{ - "range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"), - "length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"), - "runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"), - "stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"), - "in": regexp.MustCompile(`^in\((.*)\)`), - "matches": regexp.MustCompile(`^matches\((.+)\)$`), - "rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"), - "minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"), - "maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"), -} - -type customTypeTagMap struct { - validators map[string]CustomTypeValidator - - sync.RWMutex -} - -func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) { - tm.RLock() - defer tm.RUnlock() - v, ok := tm.validators[name] - return v, ok -} - -func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) { - tm.Lock() - defer tm.Unlock() - tm.validators[name] = ctv -} - -// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function. -// Use this to validate compound or custom types that need to be handled as a whole, e.g. -// `type UUID [16]byte` (this would be handled as an array of bytes). -var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)} - -// TagMap is a map of functions, that can be used as tags for ValidateStruct function. -var TagMap = map[string]Validator{ - "email": IsEmail, - "url": IsURL, - "dialstring": IsDialString, - "requrl": IsRequestURL, - "requri": IsRequestURI, - "alpha": IsAlpha, - "utfletter": IsUTFLetter, - "alphanum": IsAlphanumeric, - "utfletternum": IsUTFLetterNumeric, - "numeric": IsNumeric, - "utfnumeric": IsUTFNumeric, - "utfdigit": IsUTFDigit, - "hexadecimal": IsHexadecimal, - "hexcolor": IsHexcolor, - "rgbcolor": IsRGBcolor, - "lowercase": IsLowerCase, - "uppercase": IsUpperCase, - "int": IsInt, - "float": IsFloat, - "null": IsNull, - "notnull": IsNotNull, - "uuid": IsUUID, - "uuidv3": IsUUIDv3, - "uuidv4": IsUUIDv4, - "uuidv5": IsUUIDv5, - "creditcard": IsCreditCard, - "isbn10": IsISBN10, - "isbn13": IsISBN13, - "json": IsJSON, - "multibyte": IsMultibyte, - "ascii": IsASCII, - "printableascii": IsPrintableASCII, - "fullwidth": IsFullWidth, - "halfwidth": IsHalfWidth, - "variablewidth": IsVariableWidth, - "base64": IsBase64, - "datauri": IsDataURI, - "ip": IsIP, - "port": IsPort, - "ipv4": IsIPv4, - "ipv6": IsIPv6, - "dns": IsDNSName, - "host": IsHost, - "mac": IsMAC, - "latitude": IsLatitude, - "longitude": IsLongitude, - "ssn": IsSSN, - "semver": IsSemver, - "rfc3339": IsRFC3339, - "rfc3339WithoutZone": IsRFC3339WithoutZone, - "ISO3166Alpha2": IsISO3166Alpha2, - "ISO3166Alpha3": IsISO3166Alpha3, - "ISO4217": IsISO4217, - "IMEI": IsIMEI, -} - -// ISO3166Entry stores country codes -type ISO3166Entry struct { - EnglishShortName string - FrenchShortName string - Alpha2Code string - Alpha3Code string - Numeric string -} - -//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes" -var ISO3166List = []ISO3166Entry{ - {"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"}, - {"Albania", "Albanie (l')", "AL", "ALB", "008"}, - {"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"}, - {"Algeria", "Algérie (l')", "DZ", "DZA", "012"}, - {"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"}, - {"Andorra", "Andorre (l')", "AD", "AND", "020"}, - {"Angola", "Angola (l')", "AO", "AGO", "024"}, - {"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"}, - {"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"}, - {"Argentina", "Argentine (l')", "AR", "ARG", "032"}, - {"Australia", "Australie (l')", "AU", "AUS", "036"}, - {"Austria", "Autriche (l')", "AT", "AUT", "040"}, - {"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"}, - {"Bahrain", "Bahreïn", "BH", "BHR", "048"}, - {"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"}, - {"Armenia", "Arménie (l')", "AM", "ARM", "051"}, - {"Barbados", "Barbade (la)", "BB", "BRB", "052"}, - {"Belgium", "Belgique (la)", "BE", "BEL", "056"}, - {"Bermuda", "Bermudes (les)", "BM", "BMU", "060"}, - {"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"}, - {"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"}, - {"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"}, - {"Botswana", "Botswana (le)", "BW", "BWA", "072"}, - {"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"}, - {"Brazil", "Brésil (le)", "BR", "BRA", "076"}, - {"Belize", "Belize (le)", "BZ", "BLZ", "084"}, - {"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"}, - {"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"}, - {"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"}, - {"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"}, - {"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"}, - {"Myanmar", "Myanmar (le)", "MM", "MMR", "104"}, - {"Burundi", "Burundi (le)", "BI", "BDI", "108"}, - {"Belarus", "Bélarus (le)", "BY", "BLR", "112"}, - {"Cambodia", "Cambodge (le)", "KH", "KHM", "116"}, - {"Cameroon", "Cameroun (le)", "CM", "CMR", "120"}, - {"Canada", "Canada (le)", "CA", "CAN", "124"}, - {"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"}, - {"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"}, - {"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"}, - {"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"}, - {"Chad", "Tchad (le)", "TD", "TCD", "148"}, - {"Chile", "Chili (le)", "CL", "CHL", "152"}, - {"China", "Chine (la)", "CN", "CHN", "156"}, - {"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"}, - {"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"}, - {"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"}, - {"Colombia", "Colombie (la)", "CO", "COL", "170"}, - {"Comoros (the)", "Comores (les)", "KM", "COM", "174"}, - {"Mayotte", "Mayotte", "YT", "MYT", "175"}, - {"Congo (the)", "Congo (le)", "CG", "COG", "178"}, - {"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"}, - {"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"}, - {"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"}, - {"Croatia", "Croatie (la)", "HR", "HRV", "191"}, - {"Cuba", "Cuba", "CU", "CUB", "192"}, - {"Cyprus", "Chypre", "CY", "CYP", "196"}, - {"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"}, - {"Benin", "Bénin (le)", "BJ", "BEN", "204"}, - {"Denmark", "Danemark (le)", "DK", "DNK", "208"}, - {"Dominica", "Dominique (la)", "DM", "DMA", "212"}, - {"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"}, - {"Ecuador", "Équateur (l')", "EC", "ECU", "218"}, - {"El Salvador", "El Salvador", "SV", "SLV", "222"}, - {"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"}, - {"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"}, - {"Eritrea", "Érythrée (l')", "ER", "ERI", "232"}, - {"Estonia", "Estonie (l')", "EE", "EST", "233"}, - {"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"}, - {"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"}, - {"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"}, - {"Fiji", "Fidji (les)", "FJ", "FJI", "242"}, - {"Finland", "Finlande (la)", "FI", "FIN", "246"}, - {"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"}, - {"France", "France (la)", "FR", "FRA", "250"}, - {"French Guiana", "Guyane française (la )", "GF", "GUF", "254"}, - {"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"}, - {"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"}, - {"Djibouti", "Djibouti", "DJ", "DJI", "262"}, - {"Gabon", "Gabon (le)", "GA", "GAB", "266"}, - {"Georgia", "Géorgie (la)", "GE", "GEO", "268"}, - {"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"}, - {"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"}, - {"Germany", "Allemagne (l')", "DE", "DEU", "276"}, - {"Ghana", "Ghana (le)", "GH", "GHA", "288"}, - {"Gibraltar", "Gibraltar", "GI", "GIB", "292"}, - {"Kiribati", "Kiribati", "KI", "KIR", "296"}, - {"Greece", "Grèce (la)", "GR", "GRC", "300"}, - {"Greenland", "Groenland (le)", "GL", "GRL", "304"}, - {"Grenada", "Grenade (la)", "GD", "GRD", "308"}, - {"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"}, - {"Guam", "Guam", "GU", "GUM", "316"}, - {"Guatemala", "Guatemala (le)", "GT", "GTM", "320"}, - {"Guinea", "Guinée (la)", "GN", "GIN", "324"}, - {"Guyana", "Guyana (le)", "GY", "GUY", "328"}, - {"Haiti", "Haïti", "HT", "HTI", "332"}, - {"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"}, - {"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"}, - {"Honduras", "Honduras (le)", "HN", "HND", "340"}, - {"Hong Kong", "Hong Kong", "HK", "HKG", "344"}, - {"Hungary", "Hongrie (la)", "HU", "HUN", "348"}, - {"Iceland", "Islande (l')", "IS", "ISL", "352"}, - {"India", "Inde (l')", "IN", "IND", "356"}, - {"Indonesia", "Indonésie (l')", "ID", "IDN", "360"}, - {"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"}, - {"Iraq", "Iraq (l')", "IQ", "IRQ", "368"}, - {"Ireland", "Irlande (l')", "IE", "IRL", "372"}, - {"Israel", "Israël", "IL", "ISR", "376"}, - {"Italy", "Italie (l')", "IT", "ITA", "380"}, - {"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"}, - {"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"}, - {"Japan", "Japon (le)", "JP", "JPN", "392"}, - {"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"}, - {"Jordan", "Jordanie (la)", "JO", "JOR", "400"}, - {"Kenya", "Kenya (le)", "KE", "KEN", "404"}, - {"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"}, - {"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"}, - {"Kuwait", "Koweït (le)", "KW", "KWT", "414"}, - {"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"}, - {"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"}, - {"Lebanon", "Liban (le)", "LB", "LBN", "422"}, - {"Lesotho", "Lesotho (le)", "LS", "LSO", "426"}, - {"Latvia", "Lettonie (la)", "LV", "LVA", "428"}, - {"Liberia", "Libéria (le)", "LR", "LBR", "430"}, - {"Libya", "Libye (la)", "LY", "LBY", "434"}, - {"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"}, - {"Lithuania", "Lituanie (la)", "LT", "LTU", "440"}, - {"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"}, - {"Macao", "Macao", "MO", "MAC", "446"}, - {"Madagascar", "Madagascar", "MG", "MDG", "450"}, - {"Malawi", "Malawi (le)", "MW", "MWI", "454"}, - {"Malaysia", "Malaisie (la)", "MY", "MYS", "458"}, - {"Maldives", "Maldives (les)", "MV", "MDV", "462"}, - {"Mali", "Mali (le)", "ML", "MLI", "466"}, - {"Malta", "Malte", "MT", "MLT", "470"}, - {"Martinique", "Martinique (la)", "MQ", "MTQ", "474"}, - {"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"}, - {"Mauritius", "Maurice", "MU", "MUS", "480"}, - {"Mexico", "Mexique (le)", "MX", "MEX", "484"}, - {"Monaco", "Monaco", "MC", "MCO", "492"}, - {"Mongolia", "Mongolie (la)", "MN", "MNG", "496"}, - {"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"}, - {"Montenegro", "Monténégro (le)", "ME", "MNE", "499"}, - {"Montserrat", "Montserrat", "MS", "MSR", "500"}, - {"Morocco", "Maroc (le)", "MA", "MAR", "504"}, - {"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"}, - {"Oman", "Oman", "OM", "OMN", "512"}, - {"Namibia", "Namibie (la)", "NA", "NAM", "516"}, - {"Nauru", "Nauru", "NR", "NRU", "520"}, - {"Nepal", "Népal (le)", "NP", "NPL", "524"}, - {"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"}, - {"Curaçao", "Curaçao", "CW", "CUW", "531"}, - {"Aruba", "Aruba", "AW", "ABW", "533"}, - {"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"}, - {"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"}, - {"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"}, - {"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"}, - {"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"}, - {"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"}, - {"Niger (the)", "Niger (le)", "NE", "NER", "562"}, - {"Nigeria", "Nigéria (le)", "NG", "NGA", "566"}, - {"Niue", "Niue", "NU", "NIU", "570"}, - {"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"}, - {"Norway", "Norvège (la)", "NO", "NOR", "578"}, - {"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"}, - {"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"}, - {"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"}, - {"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"}, - {"Palau", "Palaos (les)", "PW", "PLW", "585"}, - {"Pakistan", "Pakistan (le)", "PK", "PAK", "586"}, - {"Panama", "Panama (le)", "PA", "PAN", "591"}, - {"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"}, - {"Paraguay", "Paraguay (le)", "PY", "PRY", "600"}, - {"Peru", "Pérou (le)", "PE", "PER", "604"}, - {"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"}, - {"Pitcairn", "Pitcairn", "PN", "PCN", "612"}, - {"Poland", "Pologne (la)", "PL", "POL", "616"}, - {"Portugal", "Portugal (le)", "PT", "PRT", "620"}, - {"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"}, - {"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"}, - {"Puerto Rico", "Porto Rico", "PR", "PRI", "630"}, - {"Qatar", "Qatar (le)", "QA", "QAT", "634"}, - {"Réunion", "Réunion (La)", "RE", "REU", "638"}, - {"Romania", "Roumanie (la)", "RO", "ROU", "642"}, - {"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"}, - {"Rwanda", "Rwanda (le)", "RW", "RWA", "646"}, - {"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"}, - {"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"}, - {"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"}, - {"Anguilla", "Anguilla", "AI", "AIA", "660"}, - {"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"}, - {"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"}, - {"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"}, - {"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"}, - {"San Marino", "Saint-Marin", "SM", "SMR", "674"}, - {"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"}, - {"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"}, - {"Senegal", "Sénégal (le)", "SN", "SEN", "686"}, - {"Serbia", "Serbie (la)", "RS", "SRB", "688"}, - {"Seychelles", "Seychelles (les)", "SC", "SYC", "690"}, - {"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"}, - {"Singapore", "Singapour", "SG", "SGP", "702"}, - {"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"}, - {"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"}, - {"Slovenia", "Slovénie (la)", "SI", "SVN", "705"}, - {"Somalia", "Somalie (la)", "SO", "SOM", "706"}, - {"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"}, - {"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"}, - {"Spain", "Espagne (l')", "ES", "ESP", "724"}, - {"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"}, - {"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"}, - {"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"}, - {"Suriname", "Suriname (le)", "SR", "SUR", "740"}, - {"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"}, - {"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"}, - {"Sweden", "Suède (la)", "SE", "SWE", "752"}, - {"Switzerland", "Suisse (la)", "CH", "CHE", "756"}, - {"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"}, - {"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"}, - {"Thailand", "Thaïlande (la)", "TH", "THA", "764"}, - {"Togo", "Togo (le)", "TG", "TGO", "768"}, - {"Tokelau", "Tokelau (les)", "TK", "TKL", "772"}, - {"Tonga", "Tonga (les)", "TO", "TON", "776"}, - {"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"}, - {"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"}, - {"Tunisia", "Tunisie (la)", "TN", "TUN", "788"}, - {"Turkey", "Turquie (la)", "TR", "TUR", "792"}, - {"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"}, - {"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"}, - {"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"}, - {"Uganda", "Ouganda (l')", "UG", "UGA", "800"}, - {"Ukraine", "Ukraine (l')", "UA", "UKR", "804"}, - {"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"}, - {"Egypt", "Égypte (l')", "EG", "EGY", "818"}, - {"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"}, - {"Guernsey", "Guernesey", "GG", "GGY", "831"}, - {"Jersey", "Jersey", "JE", "JEY", "832"}, - {"Isle of Man", "Île de Man", "IM", "IMN", "833"}, - {"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"}, - {"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"}, - {"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"}, - {"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"}, - {"Uruguay", "Uruguay (l')", "UY", "URY", "858"}, - {"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"}, - {"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"}, - {"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"}, - {"Samoa", "Samoa (le)", "WS", "WSM", "882"}, - {"Yemen", "Yémen (le)", "YE", "YEM", "887"}, - {"Zambia", "Zambie (la)", "ZM", "ZMB", "894"}, -} - -// ISO4217List is the list of ISO currency codes -var ISO4217List = []string{ - "AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN", - "BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD", - "CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK", - "DJF", "DKK", "DOP", "DZD", - "EGP", "ERN", "ETB", "EUR", - "FJD", "FKP", - "GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD", - "HKD", "HNL", "HRK", "HTG", "HUF", - "IDR", "ILS", "INR", "IQD", "IRR", "ISK", - "JMD", "JOD", "JPY", - "KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT", - "LAK", "LBP", "LKR", "LRD", "LSL", "LYD", - "MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN", - "NAD", "NGN", "NIO", "NOK", "NPR", "NZD", - "OMR", - "PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG", - "QAR", - "RON", "RSD", "RUB", "RWF", - "SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL", - "THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS", - "UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS", - "VEF", "VES", "VND", "VUV", - "WST", - "XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX", - "YER", - "ZAR", "ZMW", "ZWL", -} - -// ISO693Entry stores ISO language codes -type ISO693Entry struct { - Alpha3bCode string - Alpha2Code string - English string -} - -//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json -var ISO693List = []ISO693Entry{ - {Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"}, - {Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"}, - {Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"}, - {Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"}, - {Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"}, - {Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"}, - {Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"}, - {Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"}, - {Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"}, - {Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"}, - {Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"}, - {Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"}, - {Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"}, - {Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"}, - {Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"}, - {Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"}, - {Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"}, - {Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"}, - {Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"}, - {Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"}, - {Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"}, - {Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"}, - {Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"}, - {Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"}, - {Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"}, - {Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"}, - {Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"}, - {Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"}, - {Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"}, - {Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"}, - {Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"}, - {Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"}, - {Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"}, - {Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"}, - {Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"}, - {Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"}, - {Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"}, - {Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"}, - {Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"}, - {Alpha3bCode: "eng", Alpha2Code: "en", English: "English"}, - {Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"}, - {Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"}, - {Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"}, - {Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"}, - {Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"}, - {Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"}, - {Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"}, - {Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"}, - {Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"}, - {Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"}, - {Alpha3bCode: "ger", Alpha2Code: "de", English: "German"}, - {Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"}, - {Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"}, - {Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"}, - {Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"}, - {Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"}, - {Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"}, - {Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"}, - {Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"}, - {Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"}, - {Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"}, - {Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"}, - {Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"}, - {Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"}, - {Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"}, - {Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"}, - {Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"}, - {Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"}, - {Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"}, - {Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"}, - {Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"}, - {Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"}, - {Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"}, - {Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"}, - {Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"}, - {Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"}, - {Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"}, - {Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"}, - {Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"}, - {Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"}, - {Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"}, - {Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"}, - {Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"}, - {Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"}, - {Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"}, - {Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"}, - {Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"}, - {Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"}, - {Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"}, - {Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"}, - {Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"}, - {Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"}, - {Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"}, - {Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"}, - {Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"}, - {Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"}, - {Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"}, - {Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"}, - {Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"}, - {Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"}, - {Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"}, - {Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"}, - {Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"}, - {Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"}, - {Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"}, - {Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"}, - {Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"}, - {Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"}, - {Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"}, - {Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"}, - {Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"}, - {Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"}, - {Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"}, - {Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"}, - {Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"}, - {Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"}, - {Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"}, - {Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"}, - {Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"}, - {Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"}, - {Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"}, - {Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"}, - {Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"}, - {Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"}, - {Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"}, - {Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"}, - {Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"}, - {Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"}, - {Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"}, - {Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"}, - {Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"}, - {Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"}, - {Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"}, - {Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"}, - {Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"}, - {Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"}, - {Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"}, - {Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"}, - {Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"}, - {Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"}, - {Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"}, - {Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"}, - {Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"}, - {Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"}, - {Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"}, - {Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"}, - {Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"}, - {Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"}, - {Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"}, - {Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"}, - {Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"}, - {Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"}, - {Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"}, - {Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"}, - {Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"}, - {Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"}, - {Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"}, - {Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"}, - {Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"}, - {Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"}, - {Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"}, - {Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"}, - {Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"}, - {Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"}, - {Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"}, - {Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"}, - {Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"}, - {Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"}, - {Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"}, - {Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"}, - {Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"}, - {Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"}, - {Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"}, - {Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"}, - {Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"}, - {Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"}, - {Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"}, - {Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"}, - {Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"}, - {Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"}, - {Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"}, - {Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"}, - {Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"}, - {Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"}, -} diff --git a/vendor/github.com/asaskevich/govalidator/utils.go b/vendor/github.com/asaskevich/govalidator/utils.go deleted file mode 100644 index f4c30f824..000000000 --- a/vendor/github.com/asaskevich/govalidator/utils.go +++ /dev/null @@ -1,270 +0,0 @@ -package govalidator - -import ( - "errors" - "fmt" - "html" - "math" - "path" - "regexp" - "strings" - "unicode" - "unicode/utf8" -) - -// Contains checks if the string contains the substring. -func Contains(str, substring string) bool { - return strings.Contains(str, substring) -} - -// Matches checks if string matches the pattern (pattern is regular expression) -// In case of error return false -func Matches(str, pattern string) bool { - match, _ := regexp.MatchString(pattern, str) - return match -} - -// LeftTrim trims characters from the left side of the input. -// If second argument is empty, it will remove leading spaces. -func LeftTrim(str, chars string) string { - if chars == "" { - return strings.TrimLeftFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("^[" + chars + "]+") - return r.ReplaceAllString(str, "") -} - -// RightTrim trims characters from the right side of the input. -// If second argument is empty, it will remove trailing spaces. -func RightTrim(str, chars string) string { - if chars == "" { - return strings.TrimRightFunc(str, unicode.IsSpace) - } - r, _ := regexp.Compile("[" + chars + "]+$") - return r.ReplaceAllString(str, "") -} - -// Trim trims characters from both sides of the input. -// If second argument is empty, it will remove spaces. -func Trim(str, chars string) string { - return LeftTrim(RightTrim(str, chars), chars) -} - -// WhiteList removes characters that do not appear in the whitelist. -func WhiteList(str, chars string) string { - pattern := "[^" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// BlackList removes characters that appear in the blacklist. -func BlackList(str, chars string) string { - pattern := "[" + chars + "]+" - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, "") -} - -// StripLow removes characters with a numerical value < 32 and 127, mostly control characters. -// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD). -func StripLow(str string, keepNewLines bool) string { - chars := "" - if keepNewLines { - chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F" - } else { - chars = "\x00-\x1F\x7F" - } - return BlackList(str, chars) -} - -// ReplacePattern replaces regular expression pattern in string -func ReplacePattern(str, pattern, replace string) string { - r, _ := regexp.Compile(pattern) - return r.ReplaceAllString(str, replace) -} - -// Escape replaces <, >, & and " with HTML entities. -var Escape = html.EscapeString - -func addSegment(inrune, segment []rune) []rune { - if len(segment) == 0 { - return inrune - } - if len(inrune) != 0 { - inrune = append(inrune, '_') - } - inrune = append(inrune, segment...) - return inrune -} - -// UnderscoreToCamelCase converts from underscore separated form to camel case form. -// Ex.: my_func => MyFunc -func UnderscoreToCamelCase(s string) string { - return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1) -} - -// CamelCaseToUnderscore converts from camel case form to underscore separated form. -// Ex.: MyFunc => my_func -func CamelCaseToUnderscore(str string) string { - var output []rune - var segment []rune - for _, r := range str { - - // not treat number as separate segment - if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) { - output = addSegment(output, segment) - segment = nil - } - segment = append(segment, unicode.ToLower(r)) - } - output = addSegment(output, segment) - return string(output) -} - -// Reverse returns reversed string -func Reverse(s string) string { - r := []rune(s) - for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 { - r[i], r[j] = r[j], r[i] - } - return string(r) -} - -// GetLines splits string by "\n" and return array of lines -func GetLines(s string) []string { - return strings.Split(s, "\n") -} - -// GetLine returns specified line of multiline string -func GetLine(s string, index int) (string, error) { - lines := GetLines(s) - if index < 0 || index >= len(lines) { - return "", errors.New("line index out of bounds") - } - return lines[index], nil -} - -// RemoveTags removes all tags from HTML string -func RemoveTags(s string) string { - return ReplacePattern(s, "<[^>]*>", "") -} - -// SafeFileName returns safe string that can be used in file names -func SafeFileName(str string) string { - name := strings.ToLower(str) - name = path.Clean(path.Base(name)) - name = strings.Trim(name, " ") - separators, err := regexp.Compile(`[ &_=+:]`) - if err == nil { - name = separators.ReplaceAllString(name, "-") - } - legal, err := regexp.Compile(`[^[:alnum:]-.]`) - if err == nil { - name = legal.ReplaceAllString(name, "") - } - for strings.Contains(name, "--") { - name = strings.Replace(name, "--", "-", -1) - } - return name -} - -// NormalizeEmail canonicalize an email address. -// The local part of the email address is lowercased for all domains; the hostname is always lowercased and -// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail). -// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and -// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are -// normalized to @gmail.com. -func NormalizeEmail(str string) (string, error) { - if !IsEmail(str) { - return "", fmt.Errorf("%s is not an email", str) - } - parts := strings.Split(str, "@") - parts[0] = strings.ToLower(parts[0]) - parts[1] = strings.ToLower(parts[1]) - if parts[1] == "gmail.com" || parts[1] == "googlemail.com" { - parts[1] = "gmail.com" - parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0] - } - return strings.Join(parts, "@"), nil -} - -// Truncate a string to the closest length without breaking words. -func Truncate(str string, length int, ending string) string { - var aftstr, befstr string - if len(str) > length { - words := strings.Fields(str) - before, present := 0, 0 - for i := range words { - befstr = aftstr - before = present - aftstr = aftstr + words[i] + " " - present = len(aftstr) - if present > length && i != 0 { - if (length - before) < (present - length) { - return Trim(befstr, " /\\.,\"'#!?&@+-") + ending - } - return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending - } - } - } - - return str -} - -// PadLeft pads left side of a string if size of string is less then indicated pad length -func PadLeft(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, false) -} - -// PadRight pads right side of a string if size of string is less then indicated pad length -func PadRight(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, false, true) -} - -// PadBoth pads both sides of a string if size of string is less then indicated pad length -func PadBoth(str string, padStr string, padLen int) string { - return buildPadStr(str, padStr, padLen, true, true) -} - -// PadString either left, right or both sides. -// Note that padding string can be unicode and more then one character -func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string { - - // When padded length is less then the current string size - if padLen < utf8.RuneCountInString(str) { - return str - } - - padLen -= utf8.RuneCountInString(str) - - targetLen := padLen - - targetLenLeft := targetLen - targetLenRight := targetLen - if padLeft && padRight { - targetLenLeft = padLen / 2 - targetLenRight = padLen - targetLenLeft - } - - strToRepeatLen := utf8.RuneCountInString(padStr) - - repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen))) - repeatedString := strings.Repeat(padStr, repeatTimes) - - leftSide := "" - if padLeft { - leftSide = repeatedString[0:targetLenLeft] - } - - rightSide := "" - if padRight { - rightSide = repeatedString[0:targetLenRight] - } - - return leftSide + str + rightSide -} - -// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object -func TruncatingErrorf(str string, args ...interface{}) error { - n := strings.Count(str, "%s") - return fmt.Errorf(str, args[:n]...) -} diff --git a/vendor/github.com/asaskevich/govalidator/validator.go b/vendor/github.com/asaskevich/govalidator/validator.go deleted file mode 100644 index 5c918fc4b..000000000 --- a/vendor/github.com/asaskevich/govalidator/validator.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Package govalidator is package of validators and sanitizers for strings, structs and collections. -package govalidator - -import ( - "bytes" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "io/ioutil" - "net" - "net/url" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -var ( - fieldsRequiredByDefault bool - nilPtrAllowedByRequired = false - notNumberRegexp = regexp.MustCompile("[^0-9]+") - whiteSpacesAndMinus = regexp.MustCompile(`[\s-]+`) - paramsRegexp = regexp.MustCompile(`\(.*\)$`) -) - -const maxURLRuneCount = 2083 -const minURLRuneCount = 3 -const rfc3339WithoutZone = "2006-01-02T15:04:05" - -// SetFieldsRequiredByDefault causes validation to fail when struct fields -// do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). -// This struct definition will fail govalidator.ValidateStruct() (and the field values do not matter): -// type exampleStruct struct { -// Name string `` -// Email string `valid:"email"` -// This, however, will only fail when Email is empty or an invalid email address: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email"` -// Lastly, this will only fail when Email is an invalid email address but not when it's empty: -// type exampleStruct2 struct { -// Name string `valid:"-"` -// Email string `valid:"email,optional"` -func SetFieldsRequiredByDefault(value bool) { - fieldsRequiredByDefault = value -} - -// SetNilPtrAllowedByRequired causes validation to pass for nil ptrs when a field is set to required. -// The validation will still reject ptr fields in their zero value state. Example with this enabled: -// type exampleStruct struct { -// Name *string `valid:"required"` -// With `Name` set to "", this will be considered invalid input and will cause a validation error. -// With `Name` set to nil, this will be considered valid by validation. -// By default this is disabled. -func SetNilPtrAllowedByRequired(value bool) { - nilPtrAllowedByRequired = value -} - -// IsEmail checks if the string is an email. -func IsEmail(str string) bool { - // TODO uppercase letters are not supported - return rxEmail.MatchString(str) -} - -// IsExistingEmail checks if the string is an email of existing domain -func IsExistingEmail(email string) bool { - - if len(email) < 6 || len(email) > 254 { - return false - } - at := strings.LastIndex(email, "@") - if at <= 0 || at > len(email)-3 { - return false - } - user := email[:at] - host := email[at+1:] - if len(user) > 64 { - return false - } - switch host { - case "localhost", "example.com": - return true - } - if userDotRegexp.MatchString(user) || !userRegexp.MatchString(user) || !hostRegexp.MatchString(host) { - return false - } - if _, err := net.LookupMX(host); err != nil { - if _, err := net.LookupIP(host); err != nil { - return false - } - } - - return true -} - -// IsURL checks if the string is an URL. -func IsURL(str string) bool { - if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { - return false - } - strTemp := str - if strings.Contains(str, ":") && !strings.Contains(str, "://") { - // support no indicated urlscheme but with colon for port number - // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString - strTemp = "http://" + str - } - u, err := url.Parse(strTemp) - if err != nil { - return false - } - if strings.HasPrefix(u.Host, ".") { - return false - } - if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { - return false - } - return rxURL.MatchString(str) -} - -// IsRequestURL checks if the string rawurl, assuming -// it was received in an HTTP request, is a valid -// URL confirm to RFC 3986 -func IsRequestURL(rawurl string) bool { - url, err := url.ParseRequestURI(rawurl) - if err != nil { - return false //Couldn't even parse the rawurl - } - if len(url.Scheme) == 0 { - return false //No Scheme found - } - return true -} - -// IsRequestURI checks if the string rawurl, assuming -// it was received in an HTTP request, is an -// absolute URI or an absolute path. -func IsRequestURI(rawurl string) bool { - _, err := url.ParseRequestURI(rawurl) - return err == nil -} - -// IsAlpha checks if the string contains only letters (a-zA-Z). Empty string is valid. -func IsAlpha(str string) bool { - if IsNull(str) { - return true - } - return rxAlpha.MatchString(str) -} - -//IsUTFLetter checks if the string contains only unicode letter characters. -//Similar to IsAlpha but for all languages. Empty string is valid. -func IsUTFLetter(str string) bool { - if IsNull(str) { - return true - } - - for _, c := range str { - if !unicode.IsLetter(c) { - return false - } - } - return true - -} - -// IsAlphanumeric checks if the string contains only letters and numbers. Empty string is valid. -func IsAlphanumeric(str string) bool { - if IsNull(str) { - return true - } - return rxAlphanumeric.MatchString(str) -} - -// IsUTFLetterNumeric checks if the string contains only unicode letters and numbers. Empty string is valid. -func IsUTFLetterNumeric(str string) bool { - if IsNull(str) { - return true - } - for _, c := range str { - if !unicode.IsLetter(c) && !unicode.IsNumber(c) { //letters && numbers are ok - return false - } - } - return true - -} - -// IsNumeric checks if the string contains only numbers. Empty string is valid. -func IsNumeric(str string) bool { - if IsNull(str) { - return true - } - return rxNumeric.MatchString(str) -} - -// IsUTFNumeric checks if the string contains only unicode numbers of any kind. -// Numbers can be 0-9 but also Fractions ¾,Roman Ⅸ and Hangzhou 〩. Empty string is valid. -func IsUTFNumeric(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsNumber(c) { //numbers && minus sign are ok - return false - } - } - return true - -} - -// IsUTFDigit checks if the string contains only unicode radix-10 decimal digits. Empty string is valid. -func IsUTFDigit(str string) bool { - if IsNull(str) { - return true - } - if strings.IndexAny(str, "+-") > 0 { - return false - } - if len(str) > 1 { - str = strings.TrimPrefix(str, "-") - str = strings.TrimPrefix(str, "+") - } - for _, c := range str { - if !unicode.IsDigit(c) { //digits && minus sign are ok - return false - } - } - return true - -} - -// IsHexadecimal checks if the string is a hexadecimal number. -func IsHexadecimal(str string) bool { - return rxHexadecimal.MatchString(str) -} - -// IsHexcolor checks if the string is a hexadecimal color. -func IsHexcolor(str string) bool { - return rxHexcolor.MatchString(str) -} - -// IsRGBcolor checks if the string is a valid RGB color in form rgb(RRR, GGG, BBB). -func IsRGBcolor(str string) bool { - return rxRGBcolor.MatchString(str) -} - -// IsLowerCase checks if the string is lowercase. Empty string is valid. -func IsLowerCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToLower(str) -} - -// IsUpperCase checks if the string is uppercase. Empty string is valid. -func IsUpperCase(str string) bool { - if IsNull(str) { - return true - } - return str == strings.ToUpper(str) -} - -// HasLowerCase checks if the string contains at least 1 lowercase. Empty string is valid. -func HasLowerCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasLowerCase.MatchString(str) -} - -// HasUpperCase checks if the string contains as least 1 uppercase. Empty string is valid. -func HasUpperCase(str string) bool { - if IsNull(str) { - return true - } - return rxHasUpperCase.MatchString(str) -} - -// IsInt checks if the string is an integer. Empty string is valid. -func IsInt(str string) bool { - if IsNull(str) { - return true - } - return rxInt.MatchString(str) -} - -// IsFloat checks if the string is a float. -func IsFloat(str string) bool { - return str != "" && rxFloat.MatchString(str) -} - -// IsDivisibleBy checks if the string is a number that's divisible by another. -// If second argument is not valid integer or zero, it's return false. -// Otherwise, if first argument is not valid integer or zero, it's return true (Invalid string converts to zero). -func IsDivisibleBy(str, num string) bool { - f, _ := ToFloat(str) - p := int64(f) - q, _ := ToInt(num) - if q == 0 { - return false - } - return (p == 0) || (p%q == 0) -} - -// IsNull checks if the string is null. -func IsNull(str string) bool { - return len(str) == 0 -} - -// IsNotNull checks if the string is not null. -func IsNotNull(str string) bool { - return !IsNull(str) -} - -// HasWhitespaceOnly checks the string only contains whitespace -func HasWhitespaceOnly(str string) bool { - return len(str) > 0 && rxHasWhitespaceOnly.MatchString(str) -} - -// HasWhitespace checks if the string contains any whitespace -func HasWhitespace(str string) bool { - return len(str) > 0 && rxHasWhitespace.MatchString(str) -} - -// IsByteLength checks if the string's length (in bytes) falls in a range. -func IsByteLength(str string, min, max int) bool { - return len(str) >= min && len(str) <= max -} - -// IsUUIDv3 checks if the string is a UUID version 3. -func IsUUIDv3(str string) bool { - return rxUUID3.MatchString(str) -} - -// IsUUIDv4 checks if the string is a UUID version 4. -func IsUUIDv4(str string) bool { - return rxUUID4.MatchString(str) -} - -// IsUUIDv5 checks if the string is a UUID version 5. -func IsUUIDv5(str string) bool { - return rxUUID5.MatchString(str) -} - -// IsUUID checks if the string is a UUID (version 3, 4 or 5). -func IsUUID(str string) bool { - return rxUUID.MatchString(str) -} - -// IsCreditCard checks if the string is a credit card. -func IsCreditCard(str string) bool { - sanitized := notNumberRegexp.ReplaceAllString(str, "") - if !rxCreditCard.MatchString(sanitized) { - return false - } - var sum int64 - var digit string - var tmpNum int64 - var shouldDouble bool - for i := len(sanitized) - 1; i >= 0; i-- { - digit = sanitized[i:(i + 1)] - tmpNum, _ = ToInt(digit) - if shouldDouble { - tmpNum *= 2 - if tmpNum >= 10 { - sum += (tmpNum % 10) + 1 - } else { - sum += tmpNum - } - } else { - sum += tmpNum - } - shouldDouble = !shouldDouble - } - - return sum%10 == 0 -} - -// IsISBN10 checks if the string is an ISBN version 10. -func IsISBN10(str string) bool { - return IsISBN(str, 10) -} - -// IsISBN13 checks if the string is an ISBN version 13. -func IsISBN13(str string) bool { - return IsISBN(str, 13) -} - -// IsISBN checks if the string is an ISBN (version 10 or 13). -// If version value is not equal to 10 or 13, it will be checks both variants. -func IsISBN(str string, version int) bool { - sanitized := whiteSpacesAndMinus.ReplaceAllString(str, "") - var checksum int32 - var i int32 - if version == 10 { - if !rxISBN10.MatchString(sanitized) { - return false - } - for i = 0; i < 9; i++ { - checksum += (i + 1) * int32(sanitized[i]-'0') - } - if sanitized[9] == 'X' { - checksum += 10 * 10 - } else { - checksum += 10 * int32(sanitized[9]-'0') - } - if checksum%11 == 0 { - return true - } - return false - } else if version == 13 { - if !rxISBN13.MatchString(sanitized) { - return false - } - factor := []int32{1, 3} - for i = 0; i < 12; i++ { - checksum += factor[i%2] * int32(sanitized[i]-'0') - } - return (int32(sanitized[12]-'0'))-((10-(checksum%10))%10) == 0 - } - return IsISBN(str, 10) || IsISBN(str, 13) -} - -// IsJSON checks if the string is valid JSON (note: uses json.Unmarshal). -func IsJSON(str string) bool { - var js json.RawMessage - return json.Unmarshal([]byte(str), &js) == nil -} - -// IsMultibyte checks if the string contains one or more multibyte chars. Empty string is valid. -func IsMultibyte(str string) bool { - if IsNull(str) { - return true - } - return rxMultibyte.MatchString(str) -} - -// IsASCII checks if the string contains ASCII chars only. Empty string is valid. -func IsASCII(str string) bool { - if IsNull(str) { - return true - } - return rxASCII.MatchString(str) -} - -// IsPrintableASCII checks if the string contains printable ASCII chars only. Empty string is valid. -func IsPrintableASCII(str string) bool { - if IsNull(str) { - return true - } - return rxPrintableASCII.MatchString(str) -} - -// IsFullWidth checks if the string contains any full-width chars. Empty string is valid. -func IsFullWidth(str string) bool { - if IsNull(str) { - return true - } - return rxFullWidth.MatchString(str) -} - -// IsHalfWidth checks if the string contains any half-width chars. Empty string is valid. -func IsHalfWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) -} - -// IsVariableWidth checks if the string contains a mixture of full and half-width chars. Empty string is valid. -func IsVariableWidth(str string) bool { - if IsNull(str) { - return true - } - return rxHalfWidth.MatchString(str) && rxFullWidth.MatchString(str) -} - -// IsBase64 checks if a string is base64 encoded. -func IsBase64(str string) bool { - return rxBase64.MatchString(str) -} - -// IsFilePath checks is a string is Win or Unix file path and returns it's type. -func IsFilePath(str string) (bool, int) { - if rxWinPath.MatchString(str) { - //check windows path limit see: - // http://msdn.microsoft.com/en-us/library/aa365247(VS.85).aspx#maxpath - if len(str[3:]) > 32767 { - return false, Win - } - return true, Win - } else if rxUnixPath.MatchString(str) { - return true, Unix - } - return false, Unknown -} - -// IsDataURI checks if a string is base64 encoded data URI such as an image -func IsDataURI(str string) bool { - dataURI := strings.Split(str, ",") - if !rxDataURI.MatchString(dataURI[0]) { - return false - } - return IsBase64(dataURI[1]) -} - -// IsMagnetURI checks if a string is valid magnet URI -func IsMagnetURI(str string) bool { - return rxMagnetURI.MatchString(str) -} - -// IsISO3166Alpha2 checks if a string is valid two-letter country code -func IsISO3166Alpha2(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO3166Alpha3 checks if a string is valid three-letter country code -func IsISO3166Alpha3(str string) bool { - for _, entry := range ISO3166List { - if str == entry.Alpha3Code { - return true - } - } - return false -} - -// IsISO693Alpha2 checks if a string is valid two-letter language code -func IsISO693Alpha2(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha2Code { - return true - } - } - return false -} - -// IsISO693Alpha3b checks if a string is valid three-letter language code -func IsISO693Alpha3b(str string) bool { - for _, entry := range ISO693List { - if str == entry.Alpha3bCode { - return true - } - } - return false -} - -// IsDNSName will validate the given string as a DNS name -func IsDNSName(str string) bool { - if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { - // constraints already violated - return false - } - return !IsIP(str) && rxDNSName.MatchString(str) -} - -// IsHash checks if a string is a hash of type algorithm. -// Algorithm is one of ['md4', 'md5', 'sha1', 'sha256', 'sha384', 'sha512', 'ripemd128', 'ripemd160', 'tiger128', 'tiger160', 'tiger192', 'crc32', 'crc32b'] -func IsHash(str string, algorithm string) bool { - var len string - algo := strings.ToLower(algorithm) - - if algo == "crc32" || algo == "crc32b" { - len = "8" - } else if algo == "md5" || algo == "md4" || algo == "ripemd128" || algo == "tiger128" { - len = "32" - } else if algo == "sha1" || algo == "ripemd160" || algo == "tiger160" { - len = "40" - } else if algo == "tiger192" { - len = "48" - } else if algo == "sha256" { - len = "64" - } else if algo == "sha384" { - len = "96" - } else if algo == "sha512" { - len = "128" - } else { - return false - } - - return Matches(str, "^[a-f0-9]{"+len+"}$") -} - -// IsSHA512 checks is a string is a SHA512 hash. Alias for `IsHash(str, "sha512")` -func IsSHA512(str string) bool { - return IsHash(str, "sha512") -} - -// IsSHA384 checks is a string is a SHA384 hash. Alias for `IsHash(str, "sha384")` -func IsSHA384(str string) bool { - return IsHash(str, "sha384") -} - -// IsSHA256 checks is a string is a SHA256 hash. Alias for `IsHash(str, "sha256")` -func IsSHA256(str string) bool { - return IsHash(str, "sha256") -} - -// IsTiger192 checks is a string is a Tiger192 hash. Alias for `IsHash(str, "tiger192")` -func IsTiger192(str string) bool { - return IsHash(str, "tiger192") -} - -// IsTiger160 checks is a string is a Tiger160 hash. Alias for `IsHash(str, "tiger160")` -func IsTiger160(str string) bool { - return IsHash(str, "tiger160") -} - -// IsRipeMD160 checks is a string is a RipeMD160 hash. Alias for `IsHash(str, "ripemd160")` -func IsRipeMD160(str string) bool { - return IsHash(str, "ripemd160") -} - -// IsSHA1 checks is a string is a SHA-1 hash. Alias for `IsHash(str, "sha1")` -func IsSHA1(str string) bool { - return IsHash(str, "sha1") -} - -// IsTiger128 checks is a string is a Tiger128 hash. Alias for `IsHash(str, "tiger128")` -func IsTiger128(str string) bool { - return IsHash(str, "tiger128") -} - -// IsRipeMD128 checks is a string is a RipeMD128 hash. Alias for `IsHash(str, "ripemd128")` -func IsRipeMD128(str string) bool { - return IsHash(str, "ripemd128") -} - -// IsCRC32 checks is a string is a CRC32 hash. Alias for `IsHash(str, "crc32")` -func IsCRC32(str string) bool { - return IsHash(str, "crc32") -} - -// IsCRC32b checks is a string is a CRC32b hash. Alias for `IsHash(str, "crc32b")` -func IsCRC32b(str string) bool { - return IsHash(str, "crc32b") -} - -// IsMD5 checks is a string is a MD5 hash. Alias for `IsHash(str, "md5")` -func IsMD5(str string) bool { - return IsHash(str, "md5") -} - -// IsMD4 checks is a string is a MD4 hash. Alias for `IsHash(str, "md4")` -func IsMD4(str string) bool { - return IsHash(str, "md4") -} - -// IsDialString validates the given string for usage with the various Dial() functions -func IsDialString(str string) bool { - if h, p, err := net.SplitHostPort(str); err == nil && h != "" && p != "" && (IsDNSName(h) || IsIP(h)) && IsPort(p) { - return true - } - - return false -} - -// IsIP checks if a string is either IP version 4 or 6. Alias for `net.ParseIP` -func IsIP(str string) bool { - return net.ParseIP(str) != nil -} - -// IsPort checks if a string represents a valid port -func IsPort(str string) bool { - if i, err := strconv.Atoi(str); err == nil && i > 0 && i < 65536 { - return true - } - return false -} - -// IsIPv4 checks if the string is an IP version 4. -func IsIPv4(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ".") -} - -// IsIPv6 checks if the string is an IP version 6. -func IsIPv6(str string) bool { - ip := net.ParseIP(str) - return ip != nil && strings.Contains(str, ":") -} - -// IsCIDR checks if the string is an valid CIDR notiation (IPV4 & IPV6) -func IsCIDR(str string) bool { - _, _, err := net.ParseCIDR(str) - return err == nil -} - -// IsMAC checks if a string is valid MAC address. -// Possible MAC formats: -// 01:23:45:67:89:ab -// 01:23:45:67:89:ab:cd:ef -// 01-23-45-67-89-ab -// 01-23-45-67-89-ab-cd-ef -// 0123.4567.89ab -// 0123.4567.89ab.cdef -func IsMAC(str string) bool { - _, err := net.ParseMAC(str) - return err == nil -} - -// IsHost checks if the string is a valid IP (both v4 and v6) or a valid DNS name -func IsHost(str string) bool { - return IsIP(str) || IsDNSName(str) -} - -// IsMongoID checks if the string is a valid hex-encoded representation of a MongoDB ObjectId. -func IsMongoID(str string) bool { - return rxHexadecimal.MatchString(str) && (len(str) == 24) -} - -// IsLatitude checks if a string is valid latitude. -func IsLatitude(str string) bool { - return rxLatitude.MatchString(str) -} - -// IsLongitude checks if a string is valid longitude. -func IsLongitude(str string) bool { - return rxLongitude.MatchString(str) -} - -// IsIMEI checks if a string is valid IMEI -func IsIMEI(str string) bool { - return rxIMEI.MatchString(str) -} - -// IsIMSI checks if a string is valid IMSI -func IsIMSI(str string) bool { - if !rxIMSI.MatchString(str) { - return false - } - - mcc, err := strconv.ParseInt(str[0:3], 10, 32) - if err != nil { - return false - } - - switch mcc { - case 202, 204, 206, 208, 212, 213, 214, 216, 218, 219: - case 220, 221, 222, 226, 228, 230, 231, 232, 234, 235: - case 238, 240, 242, 244, 246, 247, 248, 250, 255, 257: - case 259, 260, 262, 266, 268, 270, 272, 274, 276, 278: - case 280, 282, 283, 284, 286, 288, 289, 290, 292, 293: - case 294, 295, 297, 302, 308, 310, 311, 312, 313, 314: - case 315, 316, 330, 332, 334, 338, 340, 342, 344, 346: - case 348, 350, 352, 354, 356, 358, 360, 362, 363, 364: - case 365, 366, 368, 370, 372, 374, 376, 400, 401, 402: - case 404, 405, 406, 410, 412, 413, 414, 415, 416, 417: - case 418, 419, 420, 421, 422, 424, 425, 426, 427, 428: - case 429, 430, 431, 432, 434, 436, 437, 438, 440, 441: - case 450, 452, 454, 455, 456, 457, 460, 461, 466, 467: - case 470, 472, 502, 505, 510, 514, 515, 520, 525, 528: - case 530, 536, 537, 539, 540, 541, 542, 543, 544, 545: - case 546, 547, 548, 549, 550, 551, 552, 553, 554, 555: - case 602, 603, 604, 605, 606, 607, 608, 609, 610, 611: - case 612, 613, 614, 615, 616, 617, 618, 619, 620, 621: - case 622, 623, 624, 625, 626, 627, 628, 629, 630, 631: - case 632, 633, 634, 635, 636, 637, 638, 639, 640, 641: - case 642, 643, 645, 646, 647, 648, 649, 650, 651, 652: - case 653, 654, 655, 657, 658, 659, 702, 704, 706, 708: - case 710, 712, 714, 716, 722, 724, 730, 732, 734, 736: - case 738, 740, 742, 744, 746, 748, 750, 995: - return true - default: - return false - } - return true -} - -// IsRsaPublicKey checks if a string is valid public key with provided length -func IsRsaPublicKey(str string, keylen int) bool { - bb := bytes.NewBufferString(str) - pemBytes, err := ioutil.ReadAll(bb) - if err != nil { - return false - } - block, _ := pem.Decode(pemBytes) - if block != nil && block.Type != "PUBLIC KEY" { - return false - } - var der []byte - - if block != nil { - der = block.Bytes - } else { - der, err = base64.StdEncoding.DecodeString(str) - if err != nil { - return false - } - } - - key, err := x509.ParsePKIXPublicKey(der) - if err != nil { - return false - } - pubkey, ok := key.(*rsa.PublicKey) - if !ok { - return false - } - bitlen := len(pubkey.N.Bytes()) * 8 - return bitlen == int(keylen) -} - -func toJSONName(tag string) string { - if tag == "" { - return "" - } - - // JSON name always comes first. If there's no options then split[0] is - // JSON name, if JSON name is not set, then split[0] is an empty string. - split := strings.SplitN(tag, ",", 2) - - name := split[0] - - // However it is possible that the field is skipped when - // (de-)serializing from/to JSON, in which case assume that there is no - // tag name to use - if name == "-" { - return "" - } - return name -} - -func prependPathToErrors(err error, path string) error { - switch err2 := err.(type) { - case Error: - err2.Path = append([]string{path}, err2.Path...) - return err2 - case Errors: - errors := err2.Errors() - for i, err3 := range errors { - errors[i] = prependPathToErrors(err3, path) - } - return err2 - } - return err -} - -// ValidateArray performs validation according to condition iterator that validates every element of the array -func ValidateArray(array []interface{}, iterator ConditionIterator) bool { - return Every(array, iterator) -} - -// ValidateMap use validation map for fields. -// result will be equal to `false` if there are any errors. -// s is the map containing the data to be validated. -// m is the validation map in the form: -// map[string]interface{}{"name":"required,alpha","address":map[string]interface{}{"line1":"required,alphanum"}} -func ValidateMap(s map[string]interface{}, m map[string]interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - var errs Errors - var index int - val := reflect.ValueOf(s) - for key, value := range s { - presentResult := true - validator, ok := m[key] - if !ok { - presentResult = false - var err error - err = fmt.Errorf("all map keys has to be present in the validation map; got %s", key) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - valueField := reflect.ValueOf(value) - mapResult := true - typeResult := true - structResult := true - resultField := true - switch subValidator := validator.(type) { - case map[string]interface{}: - var err error - if v, ok := value.(map[string]interface{}); !ok { - mapResult = false - err = fmt.Errorf("map validator has to be for the map type only; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } else { - mapResult, err = ValidateMap(v, subValidator) - if err != nil { - mapResult = false - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - case string: - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - subValidator != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - } - resultField, err = typeCheck(valueField, reflect.StructField{ - Name: key, - PkgPath: "", - Type: val.Type(), - Tag: reflect.StructTag(fmt.Sprintf("%s:%q", tagName, subValidator)), - Offset: 0, - Index: []int{index}, - Anonymous: false, - }, val, nil) - if err != nil { - errs = append(errs, err) - } - case nil: - // already handlerd when checked before - default: - typeResult = false - err = fmt.Errorf("map validator has to be either map[string]interface{} or string; got %s", valueField.Type().String()) - err = prependPathToErrors(err, key) - errs = append(errs, err) - } - result = result && presentResult && typeResult && resultField && structResult && mapResult - index++ - } - // checks required keys - requiredResult := true - for key, value := range m { - if schema, ok := value.(string); ok { - tags := parseTagIntoMap(schema) - if required, ok := tags["required"]; ok { - if _, ok := s[key]; !ok { - requiredResult = false - if required.customErrorMessage != "" { - err = Error{key, fmt.Errorf(required.customErrorMessage), true, "required", []string{}} - } else { - err = Error{key, fmt.Errorf("required field missing"), false, "required", []string{}} - } - errs = append(errs, err) - } - } - } - } - - if len(errs) > 0 { - err = errs - } - return result && requiredResult, err -} - -// ValidateStruct use tags for fields. -// result will be equal to `false` if there are any errors. -// todo currently there is no guarantee that errors will be returned in predictable order (tests may to fail) -func ValidateStruct(s interface{}) (bool, error) { - if s == nil { - return true, nil - } - result := true - var err error - val := reflect.ValueOf(s) - if val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { - val = val.Elem() - } - // we only accept structs - if val.Kind() != reflect.Struct { - return false, fmt.Errorf("function only accepts structs; got %s", val.Kind()) - } - var errs Errors - for i := 0; i < val.NumField(); i++ { - valueField := val.Field(i) - typeField := val.Type().Field(i) - if typeField.PkgPath != "" { - continue // Private field - } - structResult := true - if valueField.Kind() == reflect.Interface { - valueField = valueField.Elem() - } - if (valueField.Kind() == reflect.Struct || - (valueField.Kind() == reflect.Ptr && valueField.Elem().Kind() == reflect.Struct)) && - typeField.Tag.Get(tagName) != "-" { - var err error - structResult, err = ValidateStruct(valueField.Interface()) - if err != nil { - err = prependPathToErrors(err, typeField.Name) - errs = append(errs, err) - } - } - resultField, err2 := typeCheck(valueField, typeField, val, nil) - if err2 != nil { - - // Replace structure name with JSON name if there is a tag on the variable - jsonTag := toJSONName(typeField.Tag.Get("json")) - if jsonTag != "" { - switch jsonError := err2.(type) { - case Error: - jsonError.Name = jsonTag - err2 = jsonError - case Errors: - for i2, err3 := range jsonError { - switch customErr := err3.(type) { - case Error: - customErr.Name = jsonTag - jsonError[i2] = customErr - } - } - - err2 = jsonError - } - } - - errs = append(errs, err2) - } - result = result && resultField && structResult - } - if len(errs) > 0 { - err = errs - } - return result, err -} - -// ValidateStructAsync performs async validation of the struct and returns results through the channels -func ValidateStructAsync(s interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateStruct(s) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// ValidateMapAsync performs async validation of the map and returns results through the channels -func ValidateMapAsync(s map[string]interface{}, m map[string]interface{}) (<-chan bool, <-chan error) { - res := make(chan bool) - errors := make(chan error) - - go func() { - defer close(res) - defer close(errors) - - isValid, isFailed := ValidateMap(s, m) - - res <- isValid - errors <- isFailed - }() - - return res, errors -} - -// parseTagIntoMap parses a struct tag `valid:required~Some error message,length(2|3)` into map[string]string{"required": "Some error message", "length(2|3)": ""} -func parseTagIntoMap(tag string) tagOptionsMap { - optionsMap := make(tagOptionsMap) - options := strings.Split(tag, ",") - - for i, option := range options { - option = strings.TrimSpace(option) - - validationOptions := strings.Split(option, "~") - if !isValidTag(validationOptions[0]) { - continue - } - if len(validationOptions) == 2 { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], validationOptions[1], i} - } else { - optionsMap[validationOptions[0]] = tagOption{validationOptions[0], "", i} - } - } - return optionsMap -} - -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("\\'\"!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - default: - if !unicode.IsLetter(c) && !unicode.IsDigit(c) { - return false - } - } - } - return true -} - -// IsSSN will validate the given string as a U.S. Social Security Number -func IsSSN(str string) bool { - if str == "" || len(str) != 11 { - return false - } - return rxSSN.MatchString(str) -} - -// IsSemver checks if string is valid semantic version -func IsSemver(str string) bool { - return rxSemver.MatchString(str) -} - -// IsType checks if interface is of some type -func IsType(v interface{}, params ...string) bool { - if len(params) == 1 { - typ := params[0] - return strings.Replace(reflect.TypeOf(v).String(), " ", "", -1) == strings.Replace(typ, " ", "", -1) - } - return false -} - -// IsTime checks if string is valid according to given format -func IsTime(str string, format string) bool { - _, err := time.Parse(format, str) - return err == nil -} - -// IsUnixTime checks if string is valid unix timestamp value -func IsUnixTime(str string) bool { - if _, err := strconv.Atoi(str); err == nil { - return true - } - return false -} - -// IsRFC3339 checks if string is valid timestamp value according to RFC3339 -func IsRFC3339(str string) bool { - return IsTime(str, time.RFC3339) -} - -// IsRFC3339WithoutZone checks if string is valid timestamp value according to RFC3339 which excludes the timezone. -func IsRFC3339WithoutZone(str string) bool { - return IsTime(str, rfc3339WithoutZone) -} - -// IsISO4217 checks if string is valid ISO currency code -func IsISO4217(str string) bool { - for _, currency := range ISO4217List { - if str == currency { - return true - } - } - - return false -} - -// ByteLength checks string's length -func ByteLength(str string, params ...string) bool { - if len(params) == 2 { - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return len(str) >= int(min) && len(str) <= int(max) - } - - return false -} - -// RuneLength checks string's length -// Alias for StringLength -func RuneLength(str string, params ...string) bool { - return StringLength(str, params...) -} - -// IsRsaPub checks whether string is valid RSA key -// Alias for IsRsaPublicKey -func IsRsaPub(str string, params ...string) bool { - if len(params) == 1 { - len, _ := ToInt(params[0]) - return IsRsaPublicKey(str, int(len)) - } - - return false -} - -// StringMatches checks if a string matches a given pattern. -func StringMatches(s string, params ...string) bool { - if len(params) == 1 { - pattern := params[0] - return Matches(s, pattern) - } - return false -} - -// StringLength checks string's length (including multi byte strings) -func StringLength(str string, params ...string) bool { - - if len(params) == 2 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - max, _ := ToInt(params[1]) - return strLength >= int(min) && strLength <= int(max) - } - - return false -} - -// MinStringLength checks string's minimum length (including multi byte strings) -func MinStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - min, _ := ToInt(params[0]) - return strLength >= int(min) - } - - return false -} - -// MaxStringLength checks string's maximum length (including multi byte strings) -func MaxStringLength(str string, params ...string) bool { - - if len(params) == 1 { - strLength := utf8.RuneCountInString(str) - max, _ := ToInt(params[0]) - return strLength <= int(max) - } - - return false -} - -// Range checks string's length -func Range(str string, params ...string) bool { - if len(params) == 2 { - value, _ := ToFloat(str) - min, _ := ToFloat(params[0]) - max, _ := ToFloat(params[1]) - return InRange(value, min, max) - } - - return false -} - -// IsInRaw checks if string is in list of allowed values -func IsInRaw(str string, params ...string) bool { - if len(params) == 1 { - rawParams := params[0] - - parsedParams := strings.Split(rawParams, "|") - - return IsIn(str, parsedParams...) - } - - return false -} - -// IsIn checks if string str is a member of the set of strings params -func IsIn(str string, params ...string) bool { - for _, param := range params { - if str == param { - return true - } - } - - return false -} - -func checkRequired(v reflect.Value, t reflect.StructField, options tagOptionsMap) (bool, error) { - if nilPtrAllowedByRequired { - k := v.Kind() - if (k == reflect.Ptr || k == reflect.Interface) && v.IsNil() { - return true, nil - } - } - - if requiredOption, isRequired := options["required"]; isRequired { - if len(requiredOption.customErrorMessage) > 0 { - return false, Error{t.Name, fmt.Errorf(requiredOption.customErrorMessage), true, "required", []string{}} - } - return false, Error{t.Name, fmt.Errorf("non zero value required"), false, "required", []string{}} - } else if _, isOptional := options["optional"]; fieldsRequiredByDefault && !isOptional { - return false, Error{t.Name, fmt.Errorf("Missing required field"), false, "required", []string{}} - } - // not required and empty is valid - return true, nil -} - -func typeCheck(v reflect.Value, t reflect.StructField, o reflect.Value, options tagOptionsMap) (isValid bool, resultErr error) { - if !v.IsValid() { - return false, nil - } - - tag := t.Tag.Get(tagName) - - // checks if the field should be ignored - switch tag { - case "": - if v.Kind() != reflect.Slice && v.Kind() != reflect.Map { - if !fieldsRequiredByDefault { - return true, nil - } - return false, Error{t.Name, fmt.Errorf("All fields are required to at least have one validation defined"), false, "required", []string{}} - } - case "-": - return true, nil - } - - isRootType := false - if options == nil { - isRootType = true - options = parseTagIntoMap(tag) - } - - if isEmptyValue(v) { - // an empty value is not validated, checks only required - isValid, resultErr = checkRequired(v, t, options) - for key := range options { - delete(options, key) - } - return isValid, resultErr - } - - var customTypeErrors Errors - optionsOrder := options.orderedKeys() - for _, validatorName := range optionsOrder { - validatorStruct := options[validatorName] - if validatefunc, ok := CustomTypeTagMap.Get(validatorName); ok { - delete(options, validatorName) - - if result := validatefunc(v.Interface(), o.Interface()); !result { - if len(validatorStruct.customErrorMessage) > 0 { - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: TruncatingErrorf(validatorStruct.customErrorMessage, fmt.Sprint(v), validatorName), CustomErrorMessageExists: true, Validator: stripParams(validatorName)}) - continue - } - customTypeErrors = append(customTypeErrors, Error{Name: t.Name, Err: fmt.Errorf("%s does not validate as %s", fmt.Sprint(v), validatorName), CustomErrorMessageExists: false, Validator: stripParams(validatorName)}) - } - } - } - - if len(customTypeErrors.Errors()) > 0 { - return false, customTypeErrors - } - - if isRootType { - // Ensure that we've checked the value by all specified validators before report that the value is valid - defer func() { - delete(options, "optional") - delete(options, "required") - - if isValid && resultErr == nil && len(options) != 0 { - optionsOrder := options.orderedKeys() - for _, validator := range optionsOrder { - isValid = false - resultErr = Error{t.Name, fmt.Errorf( - "The following validator is invalid or can't be applied to the field: %q", validator), false, stripParams(validator), []string{}} - return - } - } - }() - } - - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for interface param validators - for key, value := range InterfaceParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := InterfaceParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - field := fmt.Sprint(v) - if result := validatefunc(v.Interface(), ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - } - } - - switch v.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, - reflect.Float32, reflect.Float64, - reflect.String: - // for each tag option checks the map of validator functions - for _, validatorSpec := range optionsOrder { - validatorStruct := options[validatorSpec] - var negate bool - validator := validatorSpec - customMsgExists := len(validatorStruct.customErrorMessage) > 0 - - // checks whether the tag looks like '!something' or 'something' - if validator[0] == '!' { - validator = validator[1:] - negate = true - } - - // checks for param validators - for key, value := range ParamTagRegexMap { - ps := value.FindStringSubmatch(validator) - if len(ps) == 0 { - continue - } - - validatefunc, ok := ParamTagMap[key] - if !ok { - continue - } - - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field, ps[1:]...); (!result && !negate) || (result && negate) { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - // type not yet supported, fail - return false, Error{t.Name, fmt.Errorf("Validator %s doesn't support kind %s", validator, v.Kind()), false, stripParams(validatorSpec), []string{}} - } - } - - if validatefunc, ok := TagMap[validator]; ok { - delete(options, validatorSpec) - - switch v.Kind() { - case reflect.String, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - field := fmt.Sprint(v) // make value into string, then validate with regex - if result := validatefunc(field); !result && !negate || result && negate { - if customMsgExists { - return false, Error{t.Name, TruncatingErrorf(validatorStruct.customErrorMessage, field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - if negate { - return false, Error{t.Name, fmt.Errorf("%s does validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - return false, Error{t.Name, fmt.Errorf("%s does not validate as %s", field, validator), customMsgExists, stripParams(validatorSpec), []string{}} - } - default: - //Not Yet Supported Types (Fail here!) - err := fmt.Errorf("Validator %s doesn't support kind %s for value %v", validator, v.Kind(), v) - return false, Error{t.Name, err, false, stripParams(validatorSpec), []string{}} - } - } - } - return true, nil - case reflect.Map: - if v.Type().Key().Kind() != reflect.String { - return false, &UnsupportedTypeError{v.Type()} - } - var sv stringValues - sv = v.MapKeys() - sort.Sort(sv) - result := true - for i, k := range sv { - var resultItem bool - var err error - if v.MapIndex(k).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.MapIndex(k), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.MapIndex(k).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+sv[i].Interface().(string)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Slice, reflect.Array: - result := true - for i := 0; i < v.Len(); i++ { - var resultItem bool - var err error - if v.Index(i).Kind() != reflect.Struct { - resultItem, err = typeCheck(v.Index(i), t, o, options) - if err != nil { - return false, err - } - } else { - resultItem, err = ValidateStruct(v.Index(i).Interface()) - if err != nil { - err = prependPathToErrors(err, t.Name+"."+strconv.Itoa(i)) - return false, err - } - } - result = result && resultItem - } - return result, nil - case reflect.Interface: - // If the value is an interface then encode its element - if v.IsNil() { - return true, nil - } - return ValidateStruct(v.Interface()) - case reflect.Ptr: - // If the value is a pointer then checks its element - if v.IsNil() { - return true, nil - } - return typeCheck(v.Elem(), t, o, options) - case reflect.Struct: - return true, nil - default: - return false, &UnsupportedTypeError{v.Type()} - } -} - -func stripParams(validatorString string) string { - return paramsRegexp.ReplaceAllString(validatorString, "") -} - -// isEmptyValue checks whether value empty or not -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.String, reflect.Array: - return v.Len() == 0 - case reflect.Map, reflect.Slice: - return v.Len() == 0 || v.IsNil() - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - - return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) -} - -// ErrorByField returns error for specified field of the struct -// validated by ValidateStruct or empty string if there are no errors -// or this field doesn't exists or doesn't have any errors. -func ErrorByField(e error, field string) string { - if e == nil { - return "" - } - return ErrorsByField(e)[field] -} - -// ErrorsByField returns map of errors of the struct validated -// by ValidateStruct or empty map if there are no errors. -func ErrorsByField(e error) map[string]string { - m := make(map[string]string) - if e == nil { - return m - } - // prototype for ValidateStruct - - switch e := e.(type) { - case Error: - m[e.Name] = e.Err.Error() - case Errors: - for _, item := range e.Errors() { - n := ErrorsByField(item) - for k, v := range n { - m[k] = v - } - } - } - - return m -} - -// Error returns string equivalent for reflect.Type -func (e *UnsupportedTypeError) Error() string { - return "validator: unsupported type: " + e.Type.String() -} - -func (sv stringValues) Len() int { return len(sv) } -func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv stringValues) Less(i, j int) bool { return sv.get(i) < sv.get(j) } -func (sv stringValues) get(i int) string { return sv[i].String() } diff --git a/vendor/github.com/asaskevich/govalidator/wercker.yml b/vendor/github.com/asaskevich/govalidator/wercker.yml deleted file mode 100644 index bc5f7b086..000000000 --- a/vendor/github.com/asaskevich/govalidator/wercker.yml +++ /dev/null @@ -1,15 +0,0 @@ -box: golang -build: - steps: - - setup-go-workspace - - - script: - name: go get - code: | - go version - go get -t ./... - - - script: - name: go test - code: | - go test -race -v ./... diff --git a/vendor/github.com/blang/semver/v4/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE deleted file mode 100644 index 5ba5c86fc..000000000 --- a/vendor/github.com/blang/semver/v4/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/v4/json.go b/vendor/github.com/blang/semver/v4/json.go deleted file mode 100644 index a74bf7c44..000000000 --- a/vendor/github.com/blang/semver/v4/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go deleted file mode 100644 index 95f7139b9..000000000 --- a/vendor/github.com/blang/semver/v4/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Contains(ap, "x") { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/v4/semver.go b/vendor/github.com/blang/semver/v4/semver.go deleted file mode 100644 index 307de610f..000000000 --- a/vendor/github.com/blang/semver/v4/semver.go +++ /dev/null @@ -1,476 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precedence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// FinalizeVersion discards prerelease and build number and only returns -// major, minor and patch number. -func (v Version) FinalizeVersion() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// IncrementPatch increments the patch version -func (v *Version) IncrementPatch() error { - v.Patch++ - return nil -} - -// IncrementMinor increments the minor version -func (v *Version) IncrementMinor() error { - v.Minor++ - v.Patch = 0 - return nil -} - -// IncrementMajor increments the major version -func (v *Version) IncrementMajor() error { - v.Major++ - v.Minor = 0 - v.Patch = 0 - return nil -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (*Version, error) { - v, err := Parse(s) - vp := &v - return vp, err -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions -// with only major and minor components specified, and removes leading 0s. -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - // Remove leading zeros. - for i, p := range parts { - if len(p) > 1 { - p = strings.TrimLeft(p, "0") - if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { - p = "0" + p - } - parts[i] = p - } - } - // Fill up shortened versions. - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - } - s = strings.Join(parts, ".") - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} - -// FinalizeVersion returns the major, minor and patch number only and discards -// prerelease and build number. -func FinalizeVersion(s string) (string, error) { - v, err := Parse(s) - if err != nil { - return "", err - } - v.Pre = nil - v.Build = nil - - finalVer := v.String() - return finalVer, nil -} diff --git a/vendor/github.com/blang/semver/v4/sort.go b/vendor/github.com/blang/semver/v4/sort.go deleted file mode 100644 index e18f88082..000000000 --- a/vendor/github.com/blang/semver/v4/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/v4/sql.go b/vendor/github.com/blang/semver/v4/sql.go deleted file mode 100644 index db958134f..000000000 --- a/vendor/github.com/blang/semver/v4/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("version.Scan: cannot convert %T to string", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14..000000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE deleted file mode 100644 index 23a0ada2f..000000000 --- a/vendor/github.com/coreos/go-systemd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/unit/deserialize.go b/vendor/github.com/coreos/go-systemd/unit/deserialize.go deleted file mode 100644 index c0c06bdfc..000000000 --- a/vendor/github.com/coreos/go-systemd/unit/deserialize.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unit - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "strings" - "unicode" -) - -const ( - // SYSTEMD_LINE_MAX mimics the maximum line length that systemd can use. - // On typical systemd platforms (i.e. modern Linux), this will most - // commonly be 2048, so let's use that as a sanity check. - // Technically, we should probably pull this at runtime: - // SYSTEMD_LINE_MAX = int(C.sysconf(C.__SC_LINE_MAX)) - // but this would introduce an (unfortunate) dependency on cgo - SYSTEMD_LINE_MAX = 2048 - - // SYSTEMD_NEWLINE defines characters that systemd considers indicators - // for a newline. - SYSTEMD_NEWLINE = "\r\n" -) - -var ( - // ErrLineTooLong gets returned when a line is too long for systemd to handle. - ErrLineTooLong = fmt.Errorf("line too long (max %d bytes)", SYSTEMD_LINE_MAX) -) - -// Deserialize parses a systemd unit file into a list of UnitOption objects. -func Deserialize(f io.Reader) (opts []*UnitOption, err error) { - lexer, optchan, errchan := newLexer(f) - go lexer.lex() - - for opt := range optchan { - opts = append(opts, &(*opt)) - } - - err = <-errchan - return opts, err -} - -func newLexer(f io.Reader) (*lexer, <-chan *UnitOption, <-chan error) { - optchan := make(chan *UnitOption) - errchan := make(chan error, 1) - buf := bufio.NewReader(f) - - return &lexer{buf, optchan, errchan, ""}, optchan, errchan -} - -type lexer struct { - buf *bufio.Reader - optchan chan *UnitOption - errchan chan error - section string -} - -func (l *lexer) lex() { - defer func() { - close(l.optchan) - close(l.errchan) - }() - next := l.lexNextSection - for next != nil { - if l.buf.Buffered() >= SYSTEMD_LINE_MAX { - // systemd truncates lines longer than LINE_MAX - // https://bugs.freedesktop.org/show_bug.cgi?id=85308 - // Rather than allowing this to pass silently, let's - // explicitly gate people from encountering this - line, err := l.buf.Peek(SYSTEMD_LINE_MAX) - if err != nil { - l.errchan <- err - return - } - if !bytes.ContainsAny(line, SYSTEMD_NEWLINE) { - l.errchan <- ErrLineTooLong - return - } - } - - var err error - next, err = next() - if err != nil { - l.errchan <- err - return - } - } -} - -type lexStep func() (lexStep, error) - -func (l *lexer) lexSectionName() (lexStep, error) { - sec, err := l.buf.ReadBytes(']') - if err != nil { - return nil, errors.New("unable to find end of section") - } - - return l.lexSectionSuffixFunc(string(sec[:len(sec)-1])), nil -} - -func (l *lexer) lexSectionSuffixFunc(section string) lexStep { - return func() (lexStep, error) { - garbage, _, err := l.toEOL() - if err != nil { - return nil, err - } - - garbage = bytes.TrimSpace(garbage) - if len(garbage) > 0 { - return nil, fmt.Errorf("found garbage after section name %s: %v", l.section, garbage) - } - - return l.lexNextSectionOrOptionFunc(section), nil - } -} - -func (l *lexer) ignoreLineFunc(next lexStep) lexStep { - return func() (lexStep, error) { - for { - line, _, err := l.toEOL() - if err != nil { - return nil, err - } - - line = bytes.TrimSuffix(line, []byte{' '}) - - // lack of continuation means this line has been exhausted - if !bytes.HasSuffix(line, []byte{'\\'}) { - break - } - } - - // reached end of buffer, safe to exit - return next, nil - } -} - -func (l *lexer) lexNextSection() (lexStep, error) { - r, _, err := l.buf.ReadRune() - if err != nil { - if err == io.EOF { - err = nil - } - return nil, err - } - - if r == '[' { - return l.lexSectionName, nil - } else if isComment(r) { - return l.ignoreLineFunc(l.lexNextSection), nil - } - - return l.lexNextSection, nil -} - -func (l *lexer) lexNextSectionOrOptionFunc(section string) lexStep { - return func() (lexStep, error) { - r, _, err := l.buf.ReadRune() - if err != nil { - if err == io.EOF { - err = nil - } - return nil, err - } - - if unicode.IsSpace(r) { - return l.lexNextSectionOrOptionFunc(section), nil - } else if r == '[' { - return l.lexSectionName, nil - } else if isComment(r) { - return l.ignoreLineFunc(l.lexNextSectionOrOptionFunc(section)), nil - } - - l.buf.UnreadRune() - return l.lexOptionNameFunc(section), nil - } -} - -func (l *lexer) lexOptionNameFunc(section string) lexStep { - return func() (lexStep, error) { - var partial bytes.Buffer - for { - r, _, err := l.buf.ReadRune() - if err != nil { - return nil, err - } - - if r == '\n' || r == '\r' { - return nil, errors.New("unexpected newline encountered while parsing option name") - } - - if r == '=' { - break - } - - partial.WriteRune(r) - } - - name := strings.TrimSpace(partial.String()) - return l.lexOptionValueFunc(section, name, bytes.Buffer{}), nil - } -} - -func (l *lexer) lexOptionValueFunc(section, name string, partial bytes.Buffer) lexStep { - return func() (lexStep, error) { - for { - line, eof, err := l.toEOL() - if err != nil { - return nil, err - } - - if len(bytes.TrimSpace(line)) == 0 { - break - } - - partial.Write(line) - - // lack of continuation means this value has been exhausted - idx := bytes.LastIndex(line, []byte{'\\'}) - if idx == -1 || idx != (len(line)-1) { - break - } - - if !eof { - partial.WriteRune('\n') - } - - return l.lexOptionValueFunc(section, name, partial), nil - } - - val := partial.String() - if strings.HasSuffix(val, "\n") { - // A newline was added to the end, so the file didn't end with a backslash. - // => Keep the newline - val = strings.TrimSpace(val) + "\n" - } else { - val = strings.TrimSpace(val) - } - l.optchan <- &UnitOption{Section: section, Name: name, Value: val} - - return l.lexNextSectionOrOptionFunc(section), nil - } -} - -// toEOL reads until the end-of-line or end-of-file. -// Returns (data, EOFfound, error) -func (l *lexer) toEOL() ([]byte, bool, error) { - line, err := l.buf.ReadBytes('\n') - // ignore EOF here since it's roughly equivalent to EOL - if err != nil && err != io.EOF { - return nil, false, err - } - - line = bytes.TrimSuffix(line, []byte{'\r'}) - line = bytes.TrimSuffix(line, []byte{'\n'}) - - return line, err == io.EOF, nil -} - -func isComment(r rune) bool { - return r == '#' || r == ';' -} diff --git a/vendor/github.com/coreos/go-systemd/unit/escape.go b/vendor/github.com/coreos/go-systemd/unit/escape.go deleted file mode 100644 index 63b11726d..000000000 --- a/vendor/github.com/coreos/go-systemd/unit/escape.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Implements systemd-escape [--unescape] [--path] - -package unit - -import ( - "fmt" - "strconv" - "strings" -) - -const ( - allowed = `:_.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789` -) - -// If isPath is true: -// We remove redundant '/'s, the leading '/', and trailing '/'. -// If the result is empty, a '/' is inserted. -// -// We always: -// Replace the following characters with `\x%x`: -// Leading `.` -// `-`, `\`, and anything not in this set: `:-_.\[0-9a-zA-Z]` -// Replace '/' with '-'. -func escape(unescaped string, isPath bool) string { - e := []byte{} - inSlashes := false - start := true - for i := 0; i < len(unescaped); i++ { - c := unescaped[i] - if isPath { - if c == '/' { - inSlashes = true - continue - } else if inSlashes { - inSlashes = false - if !start { - e = append(e, '-') - } - } - } - - if c == '/' { - e = append(e, '-') - } else if start && c == '.' || strings.IndexByte(allowed, c) == -1 { - e = append(e, []byte(fmt.Sprintf(`\x%x`, c))...) - } else { - e = append(e, c) - } - start = false - } - if isPath && len(e) == 0 { - e = append(e, '-') - } - return string(e) -} - -// If isPath is true: -// We always return a string beginning with '/'. -// -// We always: -// Replace '-' with '/'. -// Replace `\x%x` with the value represented in hex. -func unescape(escaped string, isPath bool) string { - u := []byte{} - for i := 0; i < len(escaped); i++ { - c := escaped[i] - if c == '-' { - c = '/' - } else if c == '\\' && len(escaped)-i >= 4 && escaped[i+1] == 'x' { - n, err := strconv.ParseInt(escaped[i+2:i+4], 16, 8) - if err == nil { - c = byte(n) - i += 3 - } - } - u = append(u, c) - } - if isPath && (len(u) == 0 || u[0] != '/') { - u = append([]byte("/"), u...) - } - return string(u) -} - -// UnitNameEscape escapes a string as `systemd-escape` would -func UnitNameEscape(unescaped string) string { - return escape(unescaped, false) -} - -// UnitNameUnescape unescapes a string as `systemd-escape --unescape` would -func UnitNameUnescape(escaped string) string { - return unescape(escaped, false) -} - -// UnitNamePathEscape escapes a string as `systemd-escape --path` would -func UnitNamePathEscape(unescaped string) string { - return escape(unescaped, true) -} - -// UnitNamePathUnescape unescapes a string as `systemd-escape --path --unescape` would -func UnitNamePathUnescape(escaped string) string { - return unescape(escaped, true) -} diff --git a/vendor/github.com/coreos/go-systemd/unit/option.go b/vendor/github.com/coreos/go-systemd/unit/option.go deleted file mode 100644 index 98e1af5c9..000000000 --- a/vendor/github.com/coreos/go-systemd/unit/option.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unit - -import ( - "fmt" -) - -// UnitOption represents an option in a systemd unit file. -type UnitOption struct { - Section string - Name string - Value string -} - -// NewUnitOption returns a new UnitOption instance with pre-set values. -func NewUnitOption(section, name, value string) *UnitOption { - return &UnitOption{Section: section, Name: name, Value: value} -} - -func (uo *UnitOption) String() string { - return fmt.Sprintf("{Section: %q, Name: %q, Value: %q}", uo.Section, uo.Name, uo.Value) -} - -// Match compares two UnitOptions and returns true if they are identical. -func (uo *UnitOption) Match(other *UnitOption) bool { - return uo.Section == other.Section && - uo.Name == other.Name && - uo.Value == other.Value -} - -// AllMatch compares two slices of UnitOptions and returns true if they are -// identical. -func AllMatch(u1 []*UnitOption, u2 []*UnitOption) bool { - length := len(u1) - if length != len(u2) { - return false - } - - for i := 0; i < length; i++ { - if !u1[i].Match(u2[i]) { - return false - } - } - - return true -} diff --git a/vendor/github.com/coreos/go-systemd/unit/serialize.go b/vendor/github.com/coreos/go-systemd/unit/serialize.go deleted file mode 100644 index e07799cad..000000000 --- a/vendor/github.com/coreos/go-systemd/unit/serialize.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unit - -import ( - "bytes" - "io" -) - -// Serialize encodes all of the given UnitOption objects into a -// unit file. When serialized the options are sorted in their -// supplied order but grouped by section. -func Serialize(opts []*UnitOption) io.Reader { - var buf bytes.Buffer - - if len(opts) == 0 { - return &buf - } - - // Index of sections -> ordered options - idx := map[string][]*UnitOption{} - // Separately preserve order in which sections were seen - sections := []string{} - for _, opt := range opts { - sec := opt.Section - if _, ok := idx[sec]; !ok { - sections = append(sections, sec) - } - idx[sec] = append(idx[sec], opt) - } - - for i, sect := range sections { - writeSectionHeader(&buf, sect) - writeNewline(&buf) - - opts := idx[sect] - for _, opt := range opts { - writeOption(&buf, opt) - writeNewline(&buf) - } - if i < len(sections)-1 { - writeNewline(&buf) - } - } - - return &buf -} - -func writeNewline(buf *bytes.Buffer) { - buf.WriteRune('\n') -} - -func writeSectionHeader(buf *bytes.Buffer, section string) { - buf.WriteRune('[') - buf.WriteString(section) - buf.WriteRune(']') -} - -func writeOption(buf *bytes.Buffer, opt *UnitOption) { - buf.WriteString(opt.Name) - buf.WriteRune('=') - buf.WriteString(opt.Value) -} diff --git a/vendor/github.com/coreos/ignition/LICENSE b/vendor/github.com/coreos/ignition/LICENSE deleted file mode 100644 index e06d20818..000000000 --- a/vendor/github.com/coreos/ignition/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/ignition/NOTICE b/vendor/github.com/coreos/ignition/NOTICE deleted file mode 100644 index e520005cd..000000000 --- a/vendor/github.com/coreos/ignition/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2015 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/ignition/config/shared/errors/errors.go b/vendor/github.com/coreos/ignition/config/shared/errors/errors.go deleted file mode 100644 index f2b47cf90..000000000 --- a/vendor/github.com/coreos/ignition/config/shared/errors/errors.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package errors includes errors that are used in multiple config versions -package errors - -import ( - "errors" - "fmt" -) - -var ( - // Parsing / general errors - ErrInvalid = errors.New("config is not valid") - ErrCloudConfig = errors.New("not a config (found coreos-cloudconfig)") - ErrEmpty = errors.New("not a config (empty)") - ErrUnknownVersion = errors.New("unsupported config version") - ErrScript = errors.New("not a config (found coreos-cloudinit script)") - ErrDeprecated = errors.New("config format deprecated") - ErrCompressionInvalid = errors.New("invalid compression method") - - // Ignition section errors - ErrOldVersion = errors.New("incorrect config version (too old)") - ErrNewVersion = errors.New("incorrect config version (too new)") - ErrInvalidVersion = errors.New("invalid config version (couldn't parse)") - - // Storage section errors - ErrPermissionsUnset = errors.New("permissions unset, defaulting to 0000") - ErrDiskDeviceRequired = errors.New("disk device is required") - ErrPartitionNumbersCollide = errors.New("partition numbers collide") - ErrPartitionsOverlap = errors.New("partitions overlap") - ErrPartitionsMisaligned = errors.New("partitions misaligned") - ErrAppendAndOverwrite = errors.New("cannot set both append and overwrite to true") - ErrFilesystemInvalidFormat = errors.New("invalid filesystem format") - ErrFilesystemNoMountPath = errors.New("filesystem is missing mount or path") - ErrFilesystemMountAndPath = errors.New("filesystem has both mount and path defined") - ErrUsedCreateAndMountOpts = errors.New("cannot use both create object and mount-level options field") - ErrUsedCreateAndWipeFilesystem = errors.New("cannot use both create object and wipeFilesystem field") - ErrWarningCreateDeprecated = errors.New("the create object has been deprecated in favor of mount-level options") - ErrExt4LabelTooLong = errors.New("filesystem labels cannot be longer than 16 characters when using ext4") - ErrBtrfsLabelTooLong = errors.New("filesystem labels cannot be longer than 256 characters when using btrfs") - ErrXfsLabelTooLong = errors.New("filesystem labels cannot be longer than 12 characters when using xfs") - ErrSwapLabelTooLong = errors.New("filesystem labels cannot be longer than 15 characters when using swap") - ErrVfatLabelTooLong = errors.New("filesystem labels cannot be longer than 11 characters when using vfat") - ErrFileIllegalMode = errors.New("illegal file mode") - ErrNoFilesystem = errors.New("no filesystem specified") - ErrBothIDAndNameSet = errors.New("cannot set both id and name") - ErrLabelTooLong = errors.New("partition labels may not exceed 36 characters") - ErrDoesntMatchGUIDRegex = errors.New("doesn't match the form \"01234567-89AB-CDEF-EDCB-A98765432101\"") - ErrLabelContainsColon = errors.New("partition label will be truncated to text before the colon") - ErrPathRelative = errors.New("path not absolute") - ErrSparesUnsupportedForLevel = errors.New("spares unsupported for arrays with a level greater than 0") - ErrUnrecognizedRaidLevel = errors.New("unrecognized raid level") - ErrShouldNotExistWithOthers = errors.New("shouldExist specified false with other options also specified") - ErrZeroesWithShouldNotExist = errors.New("shouldExist is false for a partition and other partition(s) has start or size 0") - ErrPartitionsUnitsMismatch = errors.New("cannot mix MBs and sectors within a disk") - ErrSizeDeprecated = errors.New("size is deprecated; use sizeMB instead") - ErrStartDeprecated = errors.New("start is deprecated; use startMB instead") - - // Passwd section errors - ErrPasswdCreateDeprecated = errors.New("the create object has been deprecated in favor of user-level options") - ErrPasswdCreateAndGecos = errors.New("cannot use both the create object and the user-level gecos field") - ErrPasswdCreateAndGroups = errors.New("cannot use both the create object and the user-level groups field") - ErrPasswdCreateAndHomeDir = errors.New("cannot use both the create object and the user-level homeDir field") - ErrPasswdCreateAndNoCreateHome = errors.New("cannot use both the create object and the user-level noCreateHome field") - ErrPasswdCreateAndNoLogInit = errors.New("cannot use both the create object and the user-level noLogInit field") - ErrPasswdCreateAndNoUserGroup = errors.New("cannot use both the create object and the user-level noUserGroup field") - ErrPasswdCreateAndPrimaryGroup = errors.New("cannot use both the create object and the user-level primaryGroup field") - ErrPasswdCreateAndShell = errors.New("cannot use both the create object and the user-level shell field") - ErrPasswdCreateAndSystem = errors.New("cannot use both the create object and the user-level system field") - ErrPasswdCreateAndUID = errors.New("cannot use both the create object and the user-level uid field") - - // Systemd and Networkd section errors - ErrInvalidSystemdExt = errors.New("invalid systemd unit extension") - ErrInvalidSystemdDropinExt = errors.New("invalid systemd drop-in extension") - ErrInvalidNetworkdExt = errors.New("invalid networkd unit extension") - ErrInvalidNetworkdDropinExt = errors.New("invalid networkd drop-in extension") - - // Misc errors - ErrInvalidScheme = errors.New("invalid url scheme") - ErrInvalidUrl = errors.New("unable to parse url") - ErrEmptyHTTPHeaderName = errors.New("HTTP header name can't be empty") - ErrDuplicateHTTPHeaders = errors.New("all header names in the list must be unique") - ErrUnsupportedSchemeForHTTPHeaders = errors.New("cannot use HTTP headers with this source scheme") - ErrHashMalformed = errors.New("malformed hash specifier") - ErrHashWrongSize = errors.New("incorrect size for hash sum") - ErrHashUnrecognized = errors.New("unrecognized hash function") - ErrEngineConfiguration = errors.New("engine incorrectly configured") - - // AWS S3 specific errors - ErrInvalidS3ObjectVersionId = errors.New("invalid S3 object VersionId") -) - -// NewNoInstallSectionError produces an error indicating the given unit, named -// name, is missing an Install section. -func NewNoInstallSectionError(name string) error { - return fmt.Errorf("unit %q is enabled, but has no install section so enable does nothing", name) -} diff --git a/vendor/github.com/coreos/ignition/config/shared/validations/unit.go b/vendor/github.com/coreos/ignition/config/shared/validations/unit.go deleted file mode 100644 index 2924fac86..000000000 --- a/vendor/github.com/coreos/ignition/config/shared/validations/unit.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package validations contains validations shared between multiple config -// versions. -package validations - -import ( - "github.com/coreos/go-systemd/unit" - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -// ValidateInstallSection is a helper to validate a given unit -func ValidateInstallSection(name string, enabled bool, contentsEmpty bool, contentSections []*unit.UnitOption) report.Report { - if !enabled { - // install sections don't matter for not-enabled units - return report.Report{} - } - if contentsEmpty { - // install sections don't matter if it has no contents, e.g. it's being masked or just has dropins or such - return report.Report{} - } - if contentSections == nil { - // Should only happen if the unit could not be parsed, at which point an - // error is probably already in the report so we don't need to double-up on - // errors + warnings. - return report.Report{} - } - - for _, section := range contentSections { - if section.Section == "Install" { - return report.Report{} - } - } - - return report.Report{ - Entries: []report.Entry{{ - Message: errors.NewNoInstallSectionError(name).Error(), - Kind: report.EntryWarning, - }}, - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/ca.go b/vendor/github.com/coreos/ignition/config/v2_2/types/ca.go deleted file mode 100644 index 7440e1e2f..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/ca.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/validate/report" -) - -func (c CaReference) ValidateSource() report.Report { - err := validateURL(c.Source) - if err != nil { - return report.ReportFromError(err, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/config.go b/vendor/github.com/coreos/ignition/config/v2_2/types/config.go deleted file mode 100644 index b1fcfcd99..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/config.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/validate/report" -) - -var ( - MaxVersion = semver.Version{ - Major: 2, - Minor: 2, - } -) - -func (c Config) Validate() report.Report { - r := report.Report{} - rules := []rule{ - checkFilesFilesystems, - checkDuplicateFilesystems, - } - - for _, rule := range rules { - rule(c, &r) - } - return r -} - -type rule func(cfg Config, report *report.Report) - -func checkNodeFilesystems(node Node, filesystems map[string]struct{}, nodeType string) report.Report { - r := report.Report{} - if node.Filesystem == "" { - // Filesystem was not specified. This is an error, but its handled in types.File's Validate, not here - return r - } - _, ok := filesystems[node.Filesystem] - if !ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("%v %q references nonexistent filesystem %q. (This is ok if it is defined in a referenced config)", - nodeType, node.Path, node.Filesystem), - }) - } - return r -} - -func checkFilesFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - filesystems[filesystem.Name] = struct{}{} - } - for _, file := range cfg.Storage.Files { - r.Merge(checkNodeFilesystems(file.Node, filesystems, "File")) - } - for _, link := range cfg.Storage.Links { - r.Merge(checkNodeFilesystems(link.Node, filesystems, "Link")) - } - for _, dir := range cfg.Storage.Directories { - r.Merge(checkNodeFilesystems(dir.Node, filesystems, "Directory")) - } -} - -func checkDuplicateFilesystems(cfg Config, r *report.Report) { - filesystems := map[string]struct{}{"root": {}} - for _, filesystem := range cfg.Storage.Filesystems { - if _, ok := filesystems[filesystem.Name]; ok { - r.Add(report.Entry{ - Kind: report.EntryWarning, - Message: fmt.Sprintf("Filesystem %q shadows exising filesystem definition", filesystem.Name), - }) - } - filesystems[filesystem.Name] = struct{}{} - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/directory.go b/vendor/github.com/coreos/ignition/config/v2_2/types/directory.go deleted file mode 100644 index 9fdc732a6..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/directory.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (d Directory) ValidateMode() report.Report { - r := report.Report{} - if err := validateMode(d.Mode); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - if d.Mode == nil { - r.Add(report.Entry{ - Message: errors.ErrPermissionsUnset.Error(), - Kind: report.EntryWarning, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/disk.go b/vendor/github.com/coreos/ignition/config/v2_2/types/disk.go deleted file mode 100644 index f0af504a1..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/disk.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Disk) Validate() report.Report { - return report.Report{} -} - -func (n Disk) ValidateDevice() report.Report { - if len(n.Device) == 0 { - return report.ReportFromError(errors.ErrDiskDeviceRequired, report.EntryError) - } - if err := validatePath(string(n.Device)); err != nil { - return report.ReportFromError(err, report.EntryError) - } - return report.Report{} -} - -func (n Disk) ValidatePartitions() report.Report { - r := report.Report{} - if n.partitionNumbersCollide() { - r.Add(report.Entry{ - Message: errors.ErrPartitionNumbersCollide.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsOverlap() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsOverlap.Error(), - Kind: report.EntryError, - }) - } - if n.partitionsMisaligned() { - r.Add(report.Entry{ - Message: errors.ErrPartitionsMisaligned.Error(), - Kind: report.EntryError, - }) - } - // Disks which have no errors at this point will likely succeed in sgdisk - return r -} - -// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. -func (n Disk) partitionNumbersCollide() bool { - m := map[int][]Partition{} - for _, p := range n.Partitions { - if p.Number != 0 { - // a number of 0 means next available number, multiple devices can specify this - m[p.Number] = append(m[p.Number], p) - } - } - for _, n := range m { - if len(n) > 1 { - // TODO(vc): return information describing the collision for logging - return true - } - } - return false -} - -// end returns the last sector of a partition. -func (p Partition) end() int { - if p.Size == 0 { - // a size of 0 means "fill available", just return the start as the end for those. - return p.Start - } - return p.Start + p.Size - 1 -} - -// partitionsOverlap returns true if any explicitly dimensioned partitions overlap -func (n Disk) partitionsOverlap() bool { - for _, p := range n.Partitions { - // Starts of 0 are placed by sgdisk into the "largest available block" at that time. - // We aren't going to check those for overlap since we don't have the disk geometry. - if p.Start == 0 { - continue - } - - for _, o := range n.Partitions { - if p == o || o.Start == 0 { - continue - } - - // is p.Start within o? - if p.Start >= o.Start && p.Start <= o.end() { - return true - } - - // is p.end() within o? - if p.end() >= o.Start && p.end() <= o.end() { - return true - } - - // do p.Start and p.end() straddle o? - if p.Start < o.Start && p.end() > o.end() { - return true - } - } - } - return false -} - -// partitionsMisaligned returns true if any of the partitions don't start on a 2048-sector (1MiB) boundary. -func (n Disk) partitionsMisaligned() bool { - for _, p := range n.Partitions { - if (p.Start & (2048 - 1)) != 0 { - return true - } - } - return false -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/file.go b/vendor/github.com/coreos/ignition/config/v2_2/types/file.go deleted file mode 100644 index b235d16c0..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/file.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (f File) Validate() report.Report { - if f.Overwrite != nil && *f.Overwrite && f.Append { - return report.ReportFromError(errors.ErrAppendAndOverwrite, report.EntryError) - } - return report.Report{} -} - -func (f File) ValidateMode() report.Report { - r := report.Report{} - if err := validateMode(f.Mode); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - if f.Mode == nil { - r.Add(report.Entry{ - Message: errors.ErrPermissionsUnset.Error(), - Kind: report.EntryWarning, - }) - } - return r -} - -func (fc FileContents) ValidateCompression() report.Report { - r := report.Report{} - switch fc.Compression { - case "", "gzip": - default: - r.Add(report.Entry{ - Message: errors.ErrCompressionInvalid.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (fc FileContents) ValidateSource() report.Report { - r := report.Report{} - err := validateURL(fc.Source) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/filesystem.go b/vendor/github.com/coreos/ignition/config/v2_2/types/filesystem.go deleted file mode 100644 index a2e43ffda..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/filesystem.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (f Filesystem) Validate() report.Report { - r := report.Report{} - if f.Mount == nil && f.Path == nil { - r.Add(report.Entry{ - Message: errors.ErrFilesystemNoMountPath.Error(), - Kind: report.EntryError, - }) - } - if f.Mount != nil { - if f.Path != nil { - r.Add(report.Entry{ - Message: errors.ErrFilesystemMountAndPath.Error(), - Kind: report.EntryError, - }) - } - if f.Mount.Create != nil { - if f.Mount.WipeFilesystem { - r.Add(report.Entry{ - Message: errors.ErrUsedCreateAndWipeFilesystem.Error(), - Kind: report.EntryError, - }) - } - if len(f.Mount.Options) > 0 { - r.Add(report.Entry{ - Message: errors.ErrUsedCreateAndMountOpts.Error(), - Kind: report.EntryError, - }) - } - r.Add(report.Entry{ - Message: errors.ErrWarningCreateDeprecated.Error(), - Kind: report.EntryWarning, - }) - } - } - return r -} - -func (f Filesystem) ValidatePath() report.Report { - r := report.Report{} - if f.Path != nil && validatePath(*f.Path) != nil { - r.Add(report.Entry{ - Message: errors.ErrPathRelative.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) Validate() report.Report { - r := report.Report{} - switch m.Format { - case "ext4", "btrfs", "xfs", "swap", "vfat": - default: - r.Add(report.Entry{ - Message: errors.ErrFilesystemInvalidFormat.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) ValidateDevice() report.Report { - r := report.Report{} - if err := validatePath(m.Device); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (m Mount) ValidateLabel() report.Report { - r := report.Report{} - if m.Label == nil { - return r - } - switch m.Format { - case "ext4": - if len(*m.Label) > 16 { - // source: man mkfs.ext4 - r.Add(report.Entry{ - Message: errors.ErrExt4LabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "btrfs": - if len(*m.Label) > 256 { - // source: man mkfs.btrfs - r.Add(report.Entry{ - Message: errors.ErrBtrfsLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "xfs": - if len(*m.Label) > 12 { - // source: man mkfs.xfs - r.Add(report.Entry{ - Message: errors.ErrXfsLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "swap": - // mkswap's man page does not state a limit on label size, but through - // experimentation it appears that mkswap will truncate long labels to - // 15 characters, so let's enforce that. - if len(*m.Label) > 15 { - r.Add(report.Entry{ - Message: errors.ErrSwapLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - case "vfat": - if len(*m.Label) > 11 { - // source: man mkfs.fat - r.Add(report.Entry{ - Message: errors.ErrVfatLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/ignition.go b/vendor/github.com/coreos/ignition/config/v2_2/types/ignition.go deleted file mode 100644 index bddf49583..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/ignition.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/go-semver/semver" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (c ConfigReference) ValidateSource() report.Report { - r := report.Report{} - err := validateURL(c.Source) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (v Ignition) Semver() (*semver.Version, error) { - return semver.NewVersion(v.Version) -} - -func (v Ignition) Validate() report.Report { - tv, err := v.Semver() - if err != nil { - return report.ReportFromError(errors.ErrInvalidVersion, report.EntryError) - } - if MaxVersion.Major > tv.Major { - return report.ReportFromError(errors.ErrOldVersion, report.EntryError) - } - if MaxVersion.LessThan(*tv) { - return report.ReportFromError(errors.ErrNewVersion, report.EntryError) - } - return report.Report{} -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/mode.go b/vendor/github.com/coreos/ignition/config/v2_2/types/mode.go deleted file mode 100644 index d06045d67..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/mode.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" -) - -func validateMode(m *int) error { - if m != nil && (*m < 0 || *m > 07777) { - return errors.ErrFileIllegalMode - } - return nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/node.go b/vendor/github.com/coreos/ignition/config/v2_2/types/node.go deleted file mode 100644 index 50badfdfb..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/node.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path/filepath" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Node) ValidateFilesystem() report.Report { - r := report.Report{} - if n.Filesystem == "" { - r.Add(report.Entry{ - Message: errors.ErrNoFilesystem.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Node) ValidatePath() report.Report { - r := report.Report{} - if err := validatePath(n.Path); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Node) Depth() int { - count := 0 - for p := filepath.Clean(string(n.Path)); p != "/"; count++ { - p = filepath.Dir(p) - } - return count -} - -func (nu NodeUser) Validate() report.Report { - r := report.Report{} - if nu.ID != nil && nu.Name != "" { - r.Add(report.Entry{ - Message: errors.ErrBothIDAndNameSet.Error(), - Kind: report.EntryError, - }) - } - return r -} -func (ng NodeGroup) Validate() report.Report { - r := report.Report{} - if ng.ID != nil && ng.Name != "" { - r.Add(report.Entry{ - Message: errors.ErrBothIDAndNameSet.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/partition.go b/vendor/github.com/coreos/ignition/config/v2_2/types/partition.go deleted file mode 100644 index 084dce7ce..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/partition.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "regexp" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -const ( - guidRegexStr = "^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$" -) - -func (p Partition) ValidateLabel() report.Report { - r := report.Report{} - // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: - // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) - - // XXX(vc): note GPT calls it a name, we're using label for consistency - // with udev naming /dev/disk/by-partlabel/*. - if len(p.Label) > 36 { - r.Add(report.Entry{ - Message: errors.ErrLabelTooLong.Error(), - Kind: report.EntryError, - }) - } - - // sgdisk uses colons for delimitting compound arguments and does not allow escaping them. - if strings.Contains(p.Label, ":") { - r.Add(report.Entry{ - Message: errors.ErrLabelContainsColon.Error(), - Kind: report.EntryWarning, - }) - } - return r -} - -func (p Partition) ValidateTypeGUID() report.Report { - return validateGUID(p.TypeGUID) -} - -func (p Partition) ValidateGUID() report.Report { - return validateGUID(p.GUID) -} - -func validateGUID(guid string) report.Report { - r := report.Report{} - ok, err := regexp.MatchString(guidRegexStr, guid) - if err != nil { - r.Add(report.Entry{ - Message: fmt.Sprintf("error matching guid regexp: %v", err), - Kind: report.EntryError, - }) - } else if !ok { - r.Add(report.Entry{ - Message: errors.ErrDoesntMatchGUIDRegex.Error(), - Kind: report.EntryError, - }) - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/passwd.go b/vendor/github.com/coreos/ignition/config/v2_2/types/passwd.go deleted file mode 100644 index 10508c56c..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/passwd.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2017 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (p PasswdUser) Validate() report.Report { - r := report.Report{} - if p.Create != nil { - r.Add(report.Entry{ - Message: errors.ErrPasswdCreateDeprecated.Error(), - Kind: report.EntryWarning, - }) - addErr := func(err error) { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - if p.Gecos != "" { - addErr(errors.ErrPasswdCreateAndGecos) - } - if len(p.Groups) > 0 { - addErr(errors.ErrPasswdCreateAndGroups) - } - if p.HomeDir != "" { - addErr(errors.ErrPasswdCreateAndHomeDir) - } - if p.NoCreateHome { - addErr(errors.ErrPasswdCreateAndNoCreateHome) - } - if p.NoLogInit { - addErr(errors.ErrPasswdCreateAndNoLogInit) - } - if p.NoUserGroup { - addErr(errors.ErrPasswdCreateAndNoUserGroup) - } - if p.PrimaryGroup != "" { - addErr(errors.ErrPasswdCreateAndPrimaryGroup) - } - if p.Shell != "" { - addErr(errors.ErrPasswdCreateAndShell) - } - if p.System { - addErr(errors.ErrPasswdCreateAndSystem) - } - if p.UID != nil { - addErr(errors.ErrPasswdCreateAndUID) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/path.go b/vendor/github.com/coreos/ignition/config/v2_2/types/path.go deleted file mode 100644 index 780607c31..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/path.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "path" - - "github.com/coreos/ignition/config/shared/errors" -) - -func validatePath(p string) error { - if !path.IsAbs(p) { - return errors.ErrPathRelative - } - return nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/raid.go b/vendor/github.com/coreos/ignition/config/v2_2/types/raid.go deleted file mode 100644 index 3aceaa9fa..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/raid.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -func (n Raid) ValidateLevel() report.Report { - r := report.Report{} - switch n.Level { - case "linear", "raid0", "0", "stripe": - if n.Spares != 0 { - r.Add(report.Entry{ - Message: errors.ErrSparesUnsupportedForLevel.Error(), - Kind: report.EntryError, - }) - } - case "raid1", "1", "mirror": - case "raid4", "4": - case "raid5", "5": - case "raid6", "6": - case "raid10", "10": - default: - r.Add(report.Entry{ - Message: errors.ErrUnrecognizedRaidLevel.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (n Raid) ValidateDevices() report.Report { - r := report.Report{} - for _, d := range n.Devices { - if err := validatePath(string(d)); err != nil { - r.Add(report.Entry{ - Message: errors.ErrPathRelative.Error(), - Kind: report.EntryError, - }) - } - } - return r -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/schema.go b/vendor/github.com/coreos/ignition/config/v2_2/types/schema.go deleted file mode 100644 index 4b32b337b..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/schema.go +++ /dev/null @@ -1,246 +0,0 @@ -package types - -// generated by "schematyper --package=types schema/ignition.json -o config/types/schema.go --root-type=Config" -- DO NOT EDIT - -type CaReference struct { - Source string `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type Config struct { - Ignition Ignition `json:"ignition"` - Networkd Networkd `json:"networkd,omitempty"` - Passwd Passwd `json:"passwd,omitempty"` - Storage Storage `json:"storage,omitempty"` - Systemd Systemd `json:"systemd,omitempty"` -} - -type ConfigReference struct { - Source string `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type Create struct { - Force bool `json:"force,omitempty"` - Options []CreateOption `json:"options,omitempty"` -} - -type CreateOption string - -type Device string - -type Directory struct { - Node - DirectoryEmbedded1 -} - -type DirectoryEmbedded1 struct { - Mode *int `json:"mode,omitempty"` -} - -type Disk struct { - Device string `json:"device,omitempty"` - Partitions []Partition `json:"partitions,omitempty"` - WipeTable bool `json:"wipeTable,omitempty"` -} - -type File struct { - Node - FileEmbedded1 -} - -type FileContents struct { - Compression string `json:"compression,omitempty"` - Source string `json:"source,omitempty"` - Verification Verification `json:"verification,omitempty"` -} - -type FileEmbedded1 struct { - Append bool `json:"append,omitempty"` - Contents FileContents `json:"contents,omitempty"` - Mode *int `json:"mode,omitempty"` -} - -type Filesystem struct { - Mount *Mount `json:"mount,omitempty"` - Name string `json:"name,omitempty"` - Path *string `json:"path,omitempty"` -} - -type Group string - -type Ignition struct { - Config IgnitionConfig `json:"config,omitempty"` - Security Security `json:"security,omitempty"` - Timeouts Timeouts `json:"timeouts,omitempty"` - Version string `json:"version,omitempty"` -} - -type IgnitionConfig struct { - Append []ConfigReference `json:"append,omitempty"` - Replace *ConfigReference `json:"replace,omitempty"` -} - -type Link struct { - Node - LinkEmbedded1 -} - -type LinkEmbedded1 struct { - Hard bool `json:"hard,omitempty"` - Target string `json:"target,omitempty"` -} - -type Mount struct { - Create *Create `json:"create,omitempty"` - Device string `json:"device,omitempty"` - Format string `json:"format,omitempty"` - Label *string `json:"label,omitempty"` - Options []MountOption `json:"options,omitempty"` - UUID *string `json:"uuid,omitempty"` - WipeFilesystem bool `json:"wipeFilesystem,omitempty"` -} - -type MountOption string - -type Networkd struct { - Units []Networkdunit `json:"units,omitempty"` -} - -type NetworkdDropin struct { - Contents string `json:"contents,omitempty"` - Name string `json:"name,omitempty"` -} - -type Networkdunit struct { - Contents string `json:"contents,omitempty"` - Dropins []NetworkdDropin `json:"dropins,omitempty"` - Name string `json:"name,omitempty"` -} - -type Node struct { - Filesystem string `json:"filesystem,omitempty"` - Group *NodeGroup `json:"group,omitempty"` - Overwrite *bool `json:"overwrite,omitempty"` - Path string `json:"path,omitempty"` - User *NodeUser `json:"user,omitempty"` -} - -type NodeGroup struct { - ID *int `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type NodeUser struct { - ID *int `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -type Partition struct { - GUID string `json:"guid,omitempty"` - Label string `json:"label,omitempty"` - Number int `json:"number,omitempty"` - Size int `json:"size,omitempty"` - Start int `json:"start,omitempty"` - TypeGUID string `json:"typeGuid,omitempty"` -} - -type Passwd struct { - Groups []PasswdGroup `json:"groups,omitempty"` - Users []PasswdUser `json:"users,omitempty"` -} - -type PasswdGroup struct { - Gid *int `json:"gid,omitempty"` - Name string `json:"name,omitempty"` - PasswordHash string `json:"passwordHash,omitempty"` - System bool `json:"system,omitempty"` -} - -type PasswdUser struct { - Create *Usercreate `json:"create,omitempty"` - Gecos string `json:"gecos,omitempty"` - Groups []Group `json:"groups,omitempty"` - HomeDir string `json:"homeDir,omitempty"` - Name string `json:"name,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - PasswordHash *string `json:"passwordHash,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty"` - Shell string `json:"shell,omitempty"` - System bool `json:"system,omitempty"` - UID *int `json:"uid,omitempty"` -} - -type Raid struct { - Devices []Device `json:"devices,omitempty"` - Level string `json:"level,omitempty"` - Name string `json:"name,omitempty"` - Options []RaidOption `json:"options,omitempty"` - Spares int `json:"spares,omitempty"` -} - -type RaidOption string - -type SSHAuthorizedKey string - -type Security struct { - TLS TLS `json:"tls,omitempty"` -} - -type Storage struct { - Directories []Directory `json:"directories,omitempty"` - Disks []Disk `json:"disks,omitempty"` - Files []File `json:"files,omitempty"` - Filesystems []Filesystem `json:"filesystems,omitempty"` - Links []Link `json:"links,omitempty"` - Raid []Raid `json:"raid,omitempty"` -} - -type Systemd struct { - Units []Unit `json:"units,omitempty"` -} - -type SystemdDropin struct { - Contents string `json:"contents,omitempty"` - Name string `json:"name,omitempty"` -} - -type TLS struct { - CertificateAuthorities []CaReference `json:"certificateAuthorities,omitempty"` -} - -type Timeouts struct { - HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty"` - HTTPTotal *int `json:"httpTotal,omitempty"` -} - -type Unit struct { - Contents string `json:"contents,omitempty"` - Dropins []SystemdDropin `json:"dropins,omitempty"` - Enable bool `json:"enable,omitempty"` - Enabled *bool `json:"enabled,omitempty"` - Mask bool `json:"mask,omitempty"` - Name string `json:"name,omitempty"` -} - -type Usercreate struct { - Gecos string `json:"gecos,omitempty"` - Groups []UsercreateGroup `json:"groups,omitempty"` - HomeDir string `json:"homeDir,omitempty"` - NoCreateHome bool `json:"noCreateHome,omitempty"` - NoLogInit bool `json:"noLogInit,omitempty"` - NoUserGroup bool `json:"noUserGroup,omitempty"` - PrimaryGroup string `json:"primaryGroup,omitempty"` - Shell string `json:"shell,omitempty"` - System bool `json:"system,omitempty"` - UID *int `json:"uid,omitempty"` -} - -type UsercreateGroup string - -type Verification struct { - Hash *string `json:"hash,omitempty"` -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/unit.go b/vendor/github.com/coreos/ignition/config/v2_2/types/unit.go deleted file mode 100644 index 70fe1179a..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/unit.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "path" - "strings" - - "github.com/coreos/go-systemd/unit" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/shared/validations" - "github.com/coreos/ignition/config/validate/report" -) - -func (u Unit) ValidateContents() report.Report { - r := report.Report{} - opts, err := validateUnitContent(u.Contents) - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - isEnabled := u.Enable || (u.Enabled != nil && *u.Enabled) - r.Merge(validations.ValidateInstallSection(u.Name, isEnabled, u.Contents == "", opts)) - - return r -} - -func (u Unit) ValidateName() report.Report { - r := report.Report{} - switch path.Ext(u.Name) { - case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidSystemdExt.Error(), - Kind: report.EntryError, - }) - } - return r -} - -func (d SystemdDropin) Validate() report.Report { - r := report.Report{} - - if _, err := validateUnitContent(d.Contents); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - switch path.Ext(d.Name) { - case ".conf": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidSystemdDropinExt.Error(), - Kind: report.EntryError, - }) - } - - return r -} - -func (u Networkdunit) Validate() report.Report { - r := report.Report{} - - if _, err := validateUnitContent(u.Contents); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - switch path.Ext(u.Name) { - case ".link", ".netdev", ".network": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidNetworkdExt.Error(), - Kind: report.EntryError, - }) - } - - return r -} - -func (d NetworkdDropin) Validate() report.Report { - r := report.Report{} - - if _, err := validateUnitContent(d.Contents); err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - } - - switch path.Ext(d.Name) { - case ".conf": - default: - r.Add(report.Entry{ - Message: errors.ErrInvalidNetworkdDropinExt.Error(), - Kind: report.EntryError, - }) - } - - return r -} - -func validateUnitContent(content string) ([]*unit.UnitOption, error) { - c := strings.NewReader(content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/url.go b/vendor/github.com/coreos/ignition/config/v2_2/types/url.go deleted file mode 100644 index 11148fc08..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/url.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "net/url" - - "github.com/vincent-petithory/dataurl" - - "github.com/coreos/ignition/config/shared/errors" -) - -func validateURL(s string) error { - // Empty url is valid, indicates an empty file - if s == "" { - return nil - } - u, err := url.Parse(s) - if err != nil { - return errors.ErrInvalidUrl - } - - switch u.Scheme { - case "http", "https", "oem", "tftp": - return nil - case "s3": - if v, ok := u.Query()["versionId"]; ok { - if len(v) == 0 || v[0] == "" { - return errors.ErrInvalidS3ObjectVersionId - } - } - return nil - case "data": - if _, err := dataurl.DecodeString(s); err != nil { - return err - } - return nil - default: - return errors.ErrInvalidScheme - } -} diff --git a/vendor/github.com/coreos/ignition/config/v2_2/types/verification.go b/vendor/github.com/coreos/ignition/config/v2_2/types/verification.go deleted file mode 100644 index 51e7d1550..000000000 --- a/vendor/github.com/coreos/ignition/config/v2_2/types/verification.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "crypto" - "encoding/hex" - "strings" - - "github.com/coreos/ignition/config/shared/errors" - "github.com/coreos/ignition/config/validate/report" -) - -// HashParts will return the sum and function (in that order) of the hash stored -// in this Verification, or an error if there is an issue during parsing. -func (v Verification) HashParts() (string, string, error) { - if v.Hash == nil { - // The hash can be nil - return "", "", nil - } - parts := strings.SplitN(*v.Hash, "-", 2) - if len(parts) != 2 { - return "", "", errors.ErrHashMalformed - } - - return parts[0], parts[1], nil -} - -func (v Verification) Validate() report.Report { - r := report.Report{} - - if v.Hash == nil { - // The hash can be nil - return r - } - - function, sum, err := v.HashParts() - if err != nil { - r.Add(report.Entry{ - Message: err.Error(), - Kind: report.EntryError, - }) - return r - } - var hash crypto.Hash - switch function { - case "sha512": - hash = crypto.SHA512 - default: - r.Add(report.Entry{ - Message: errors.ErrHashUnrecognized.Error(), - Kind: report.EntryError, - }) - return r - } - - if len(sum) != hex.EncodedLen(hash.Size()) { - r.Add(report.Entry{ - Message: errors.ErrHashWrongSize.Error(), - Kind: report.EntryError, - }) - } - - return r -} diff --git a/vendor/github.com/coreos/ignition/config/validate/report/report.go b/vendor/github.com/coreos/ignition/config/validate/report/report.go deleted file mode 100644 index e0d4fed8d..000000000 --- a/vendor/github.com/coreos/ignition/config/validate/report/report.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package report - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" -) - -type Report struct { - Entries []Entry -} - -func (into *Report) Merge(from Report) { - into.Entries = append(into.Entries, from.Entries...) -} - -func ReportFromError(err error, severity entryKind) Report { - if err == nil { - return Report{} - } - return Report{ - Entries: []Entry{ - { - Kind: severity, - Message: err.Error(), - }, - }, - } -} - -// Sort sorts the entries by line number, then column number -func (r *Report) Sort() { - sort.Sort(entries(r.Entries)) -} - -type entries []Entry - -func (e entries) Len() int { - return len(e) -} - -func (e entries) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e entries) Less(i, j int) bool { - if e[i].Line != e[j].Line { - return e[i].Line < e[j].Line - } - return e[i].Column < e[j].Column -} - -const ( - EntryError entryKind = iota - EntryWarning - EntryInfo - EntryDeprecated -) - -// AddPosition updates all the entries with Line equal to 0 and sets the Line/Column fields to line/column. This is useful for -// when a type has a custom unmarshaller and thus can't determine an exact offset of the error with the type. In this case -// the offset for the entire chunk of json that got unmarshalled to the type can be used instead, which is still pretty good. -func (r *Report) AddPosition(line, col int, highlight string) { - for i, e := range r.Entries { - if e.Line == 0 { - r.Entries[i].Line = line - r.Entries[i].Column = col - r.Entries[i].Highlight = highlight - } - } -} - -func (r *Report) Add(e Entry) { - r.Entries = append(r.Entries, e) -} - -func (r Report) String() string { - var errs bytes.Buffer - for i, entry := range r.Entries { - if i != 0 { - // Only add line breaks on multiline reports - errs.WriteString("\n") - } - errs.WriteString(entry.String()) - } - return errs.String() -} - -// IsFatal returns if there were any errors that make the config invalid -func (r Report) IsFatal() bool { - for _, entry := range r.Entries { - if entry.Kind == EntryError { - return true - } - } - return false -} - -// IsDeprecated returns if the report has deprecations -func (r Report) IsDeprecated() bool { - for _, entry := range r.Entries { - if entry.Kind == EntryDeprecated { - return true - } - } - return false -} - -type Entry struct { - Kind entryKind `json:"kind"` - Message string `json:"message"` - Line int `json:"line,omitempty"` - Column int `json:"column,omitempty"` - Highlight string `json:"-"` -} - -func (e Entry) String() string { - if e.Line != 0 { - return fmt.Sprintf("%s at line %d, column %d\n%s%v", e.Kind.String(), e.Line, e.Column, e.Highlight, e.Message) - } - return fmt.Sprintf("%s: %v", e.Kind.String(), e.Message) -} - -type entryKind int - -func (e entryKind) String() string { - switch e { - case EntryError: - return "error" - case EntryWarning: - return "warning" - case EntryInfo: - return "info" - case EntryDeprecated: - return "deprecated" - default: - return "unknown error" - } -} - -func (e entryKind) MarshalJSON() ([]byte, error) { - return json.Marshal(e.String()) -} diff --git a/vendor/github.com/coreos/ignition/v2/config/merge/merge.go b/vendor/github.com/coreos/ignition/v2/config/merge/merge.go index b58ef2cbc..3fc68a446 100644 --- a/vendor/github.com/coreos/ignition/v2/config/merge/merge.go +++ b/vendor/github.com/coreos/ignition/v2/config/merge/merge.go @@ -15,13 +15,9 @@ package merge import ( - "fmt" "reflect" - "strings" "github.com/coreos/ignition/v2/config/util" - - "github.com/coreos/vcontext/path" ) // Rules of Config Merging: @@ -39,50 +35,6 @@ import ( // - remove entries from the parent with the same Key() that are not in the same list // - append entries that are unique to the child -const ( - TAG_PARENT = "parent" - TAG_CHILD = "child" - TAG_RESULT = "result" -) - -// The path to one output field, and its corresponding input. From.Tag will -// be TAG_PARENT or TAG_CHILD depending on the origin of the field. -type Mapping struct { - From path.ContextPath - To path.ContextPath -} - -func (m Mapping) String() string { - return fmt.Sprintf("%s:%s → %s", m.From.Tag, m.From, m.To) -} - -type Transcript struct { - Mappings []Mapping -} - -func (t Transcript) String() string { - var lines []string - for _, m := range t.Mappings { - lines = append(lines, m.String()) - } - return strings.Join(lines, "\n") -} - -// pathAppendField looks up the JSON field name for field and returns base -// with that field name appended. -func pathAppendField(base path.ContextPath, field reflect.StructField) path.ContextPath { - tagName := strings.Split(field.Tag.Get("json"), ",")[0] - if tagName != "" { - return base.Append(tagName) - } - if field.Anonymous { - // field is a struct embedded in another struct (e.g. - // FileEmbedded1). Pretend it doesn't exist. - return base - } - panic("no JSON struct tag for " + field.Name) -} - // appendToSlice is a helper that appends to a slice without returning a new one. // panics if len >= cap func appendToSlice(s, v reflect.Value) { @@ -105,14 +57,11 @@ type structInfo struct { // checking done across all fields that share that handle mergedKeys map[string]string - // map from each handle + key() to the corresponding item + // map from each handle + key() value to what list it came from keysToValues map[handleKey]reflect.Value // map from each handle + key() to the list it came from keysToLists map[handleKey]string - - // map from each handle + key() to the index within the list - keysToListIndexes map[handleKey]int } // returns if this field should not do duplicate checking/merging @@ -122,10 +71,9 @@ func (s structInfo) ignoreField(name string) bool { } // getChildEntryByKey takes the name of a field (not handle) in the parent and a key and looks that entry -// up in the child. It will look up across all slices that share the same handle. It returns the value, -// name of the field in the child it was found in, and the list index within that field. The bool indicates -// whether it was found. -func (s structInfo) getChildEntryByKey(fieldName, key string) (reflect.Value, string, int, bool) { +// up in the child. It will look up across all slices that share the same handle. It return the value and +// name of the field in the child it was found in. The bool indicates whether it was found. +func (s structInfo) getChildEntryByKey(fieldName, key string) (reflect.Value, string, bool) { handle := fieldName if tmp, ok := s.mergedKeys[fieldName]; ok { handle = tmp @@ -136,9 +84,9 @@ func (s structInfo) getChildEntryByKey(fieldName, key string) (reflect.Value, st key: key, } if v, ok := s.keysToValues[hkey]; ok { - return v, s.keysToLists[hkey], s.keysToListIndexes[hkey], true + return v, s.keysToLists[hkey], true } - return reflect.Value{}, "", 0, false + return reflect.Value{}, "", false } func newStructInfo(parent, child reflect.Value) structInfo { @@ -154,7 +102,6 @@ func newStructInfo(parent, child reflect.Value) structInfo { keysToValues := map[handleKey]reflect.Value{} keysToLists := map[handleKey]string{} - keysToListIndexes := map[handleKey]int{} for i := 0; i < child.NumField(); i++ { field := child.Field(i) if field.Kind() != reflect.Slice { @@ -179,88 +126,47 @@ func newStructInfo(parent, child reflect.Value) structInfo { } keysToValues[hkey] = v keysToLists[hkey] = fieldName - keysToListIndexes[hkey] = j } } return structInfo{ - ignoreDups: ignoreDups, - mergedKeys: mergedKeys, - keysToValues: keysToValues, - keysToLists: keysToLists, - keysToListIndexes: keysToListIndexes, + ignoreDups: ignoreDups, + mergedKeys: mergedKeys, + keysToValues: keysToValues, + keysToLists: keysToLists, } } -// Deprecated: Use MergeStructTranscribe() instead. -func MergeStruct(parent, child reflect.Value) reflect.Value { - result, _ := MergeStructTranscribe(parent.Interface(), child.Interface()) - return reflect.ValueOf(result) -} - -// MergeStructTranscribe is intended for use by config/vX_Y/ packages and -// by generic external translation code. Most users should use the properly -// typed wrappers provided by the config/vX_Y/ packages. -// -// MergeStructTranscribe merges the specified configs and returns a -// transcript of the actions taken. parent and child MUST be the same type. -func MergeStructTranscribe(parent, child interface{}) (interface{}, Transcript) { - var transcript Transcript - result := mergeStruct(reflect.ValueOf(parent), path.New(TAG_PARENT), reflect.ValueOf(child), path.New(TAG_CHILD), path.New(TAG_RESULT), &transcript) - return result.Interface(), transcript -} - +// MergeStruct is intended for use by config/vX_Y/ packages only. They should expose their own Merge() that is properly +// typed. Use that one instead. // parent and child MUST be the same type -// we transcribe all leaf fields, and all intermediate structs that wholly -// originate from either parent or child -func mergeStruct(parent reflect.Value, parentPath path.ContextPath, child reflect.Value, childPath path.ContextPath, resultPath path.ContextPath, transcript *Transcript) reflect.Value { +func MergeStruct(parent, child reflect.Value) reflect.Value { // use New() so it's settable, addr-able, etc result := reflect.New(parent.Type()).Elem() info := newStructInfo(parent, child) for i := 0; i < parent.NumField(); i++ { - fieldMeta := parent.Type().Field(i) + fieldName := parent.Type().Field(i).Name parentField := parent.Field(i) childField := child.Field(i) resultField := result.Field(i) - parentFieldPath := pathAppendField(parentPath, fieldMeta) - childFieldPath := pathAppendField(childPath, fieldMeta) - resultFieldPath := pathAppendField(resultPath, fieldMeta) kind := parentField.Kind() switch { case util.IsPrimitive(kind): resultField.Set(childField) - transcribe(childFieldPath, resultFieldPath, resultField, fieldMeta, transcript) - case kind == reflect.Ptr && !parentField.IsNil() && !childField.IsNil() && parentField.Elem().Kind() == reflect.Struct: - // we're not supposed to have struct pointers, but some - // ended up in the Clevis and Luks structs in spec 3.2.0 - // https://github.com/coreos/ignition/issues/1132 - resultField.Set(mergeStruct(parentField.Elem(), parentFieldPath, childField.Elem(), childFieldPath, resultFieldPath, transcript).Addr()) case kind == reflect.Ptr && childField.IsNil(): resultField.Set(parentField) - transcribe(parentFieldPath, resultFieldPath, resultField, fieldMeta, transcript) case kind == reflect.Ptr && !childField.IsNil(): resultField.Set(childField) - transcribe(childFieldPath, resultFieldPath, resultField, fieldMeta, transcript) case kind == reflect.Struct: - resultField.Set(mergeStruct(parentField, parentFieldPath, childField, childFieldPath, resultFieldPath, transcript)) - case kind == reflect.Slice && info.ignoreField(fieldMeta.Name): + resultField.Set(MergeStruct(parentField, childField)) + case kind == reflect.Slice && info.ignoreField(fieldName): if parentField.Len()+childField.Len() == 0 { continue } - resultField.Set(reflect.MakeSlice(parentField.Type(), 0, parentField.Len()+childField.Len())) - for i := 0; i < parentField.Len(); i++ { - item := parentField.Index(i) - appendToSlice(resultField, item) - transcribe(parentFieldPath.Append(i), resultFieldPath.Append(i), item, fieldMeta, transcript) - } - for i := 0; i < childField.Len(); i++ { - item := childField.Index(i) - appendToSlice(resultField, item) - transcribe(childFieldPath.Append(i), resultFieldPath.Append(parentField.Len()+i), item, fieldMeta, transcript) - } - case kind == reflect.Slice && !info.ignoreField(fieldMeta.Name): + resultField.Set(reflect.AppendSlice(parentField, childField)) + case kind == reflect.Slice && !info.ignoreField(fieldName): // ooph, this is a doosey maxlen := parentField.Len() + childField.Len() if maxlen == 0 { @@ -269,50 +175,40 @@ func mergeStruct(parent reflect.Value, parentPath path.ContextPath, child reflec resultField.Set(reflect.MakeSlice(parentField.Type(), 0, parentField.Len()+childField.Len())) parentKeys := getKeySet(parentField) - // walk parent items for i := 0; i < parentField.Len(); i++ { parentItem := parentField.Index(i) - parentItemPath := parentFieldPath.Append(i) - resultItemPath := resultFieldPath.Append(resultField.Len()) key := util.CallKey(parentItem) - if childItem, childList, childListIndex, ok := info.getChildEntryByKey(fieldMeta.Name, key); ok { - if childList == fieldMeta.Name { + if childItem, childList, ok := info.getChildEntryByKey(fieldName, key); ok { + if childList == fieldName { // case 1: in child config in same list - childItemPath := childFieldPath.Append(childListIndex) if childItem.Kind() == reflect.Struct { // If HTTP header Value is nil, it means that we should remove the // parent header from the result. - if fieldMeta.Name == "HTTPHeaders" && childItem.FieldByName("Value").IsNil() { + if fieldName == "HTTPHeaders" && childItem.FieldByName("Value").IsNil() { continue } - appendToSlice(resultField, mergeStruct(parentItem, parentItemPath, childItem, childItemPath, resultItemPath, transcript)) + appendToSlice(resultField, MergeStruct(parentItem, childItem)) } else if util.IsPrimitive(childItem.Kind()) { appendToSlice(resultField, childItem) - transcribe(childItemPath, resultItemPath, childItem, fieldMeta, transcript) } else { panic("List of pointers or slices or something else weird") } - } else { // nolint:staticcheck + } else { // case 2: in child config in different list. Do nothing since it'll be handled iterating over that list } } else { // case 3: not in child config, append it appendToSlice(resultField, parentItem) - transcribe(parentItemPath, resultItemPath, parentItem, fieldMeta, transcript) } } - // append child items not in parent for i := 0; i < childField.Len(); i++ { childItem := childField.Index(i) - childItemPath := childFieldPath.Append(i) - resultItemPath := resultFieldPath.Append(resultField.Len()) key := util.CallKey(childItem) if _, alreadyMerged := parentKeys[key]; !alreadyMerged { - // We only check the parentMap for this field. If the parent had a matching entry in a different field + // We only check the parentMap for this field. If the parent had a matching entry in a differnt field // then it would be skipped as case 2 above appendToSlice(resultField, childItem) - transcribe(childItemPath, resultItemPath, childItem, fieldMeta, transcript) } } default: @@ -323,60 +219,6 @@ func mergeStruct(parent reflect.Value, parentPath path.ContextPath, child reflec return result } -// transcribe is called by mergeStruct when the latter decides to merge a -// subtree wholesale from either the parent or child, and thus loses -// interest in that subtree. transcribe descends the rest of that subtree, -// transcribing all of its populated leaves. It returns true if we -// transcribed anything. -func transcribe(fromPath path.ContextPath, toPath path.ContextPath, value reflect.Value, fieldMeta reflect.StructField, transcript *Transcript) bool { - add := func(from, to path.ContextPath) { - transcript.Mappings = append(transcript.Mappings, Mapping{ - From: from.Copy(), - To: to.Copy(), - }) - } - - kind := value.Kind() - switch { - case util.IsPrimitive(kind): - if value.Interface() == reflect.Zero(value.Type()).Interface() { - return false - } - add(fromPath, toPath) - case kind == reflect.Ptr: - if value.IsNil() { - return false - } - if value.Elem().Kind() == reflect.Struct { - // we're not supposed to have struct pointers, but some - // ended up in the Clevis and Luks structs in spec 3.2.0 - // https://github.com/coreos/ignition/issues/1132 - return transcribe(fromPath, toPath, value.Elem(), fieldMeta, transcript) - } - add(fromPath, toPath) - case kind == reflect.Struct: - var transcribed bool - for i := 0; i < value.NumField(); i++ { - valueFieldMeta := value.Type().Field(i) - transcribed = transcribe(pathAppendField(fromPath, valueFieldMeta), pathAppendField(toPath, valueFieldMeta), value.Field(i), valueFieldMeta, transcript) || transcribed - } - // embedded structs and empty structs should be invisible - if transcribed && !fieldMeta.Anonymous { - add(fromPath, toPath) - } - return transcribed - case kind == reflect.Slice: - var transcribed bool - for i := 0; i < value.Len(); i++ { - transcribed = transcribe(fromPath.Append(i), toPath.Append(i), value.Index(i), fieldMeta, transcript) || transcribed - } - return transcribed - default: - panic("unreachable code reached") - } - return true -} - // getKeySet takes a value of a slice and returns the set of all the Key() values in that slice func getKeySet(list reflect.Value) map[string]struct{} { m := map[string]struct{}{} diff --git a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go index 4d9906d9a..577ddf935 100644 --- a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go +++ b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go @@ -46,7 +46,7 @@ var ( ErrVerificationAndNilSource = errors.New("source must be specified if verification is specified") ErrFilesystemInvalidFormat = errors.New("invalid filesystem format") ErrLabelNeedsFormat = errors.New("filesystem must specify format if label is specified") - ErrFormatNilWithOthers = errors.New("format cannot be empty when path, label, uuid, wipeFilesystem, options, or mountOptions is specified") + ErrFormatNilWithOthers = errors.New("format cannot be empty when path, label, uuid, or options are specified") ErrExt4LabelTooLong = errors.New("filesystem labels cannot be longer than 16 characters when using ext4") ErrBtrfsLabelTooLong = errors.New("filesystem labels cannot be longer than 256 characters when using btrfs") ErrXfsLabelTooLong = errors.New("filesystem labels cannot be longer than 12 characters when using xfs") diff --git a/vendor/github.com/coreos/ignition/v2/config/util/parsingErrors.go b/vendor/github.com/coreos/ignition/v2/config/util/parsingErrors.go index 2af85e2bc..e55d96849 100644 --- a/vendor/github.com/coreos/ignition/v2/config/util/parsingErrors.go +++ b/vendor/github.com/coreos/ignition/v2/config/util/parsingErrors.go @@ -16,6 +16,7 @@ package util import ( "encoding/json" + "fmt" "github.com/coreos/ignition/v2/config/shared/errors" @@ -41,6 +42,7 @@ func HandleParseErrors(rawConfig []byte, to interface{}) (report.Report, error) node.Marker = tree.MarkerFromIndices(t.Offset, -1) } tree.FixLineColumn(node, rawConfig) + fmt.Printf("%+v\n", node.Marker.StartP.Index) r.AddOnError(path.ContextPath{Tag: "json"}, err) r.Correlate(node) diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/config.go index 8d0abbcce..e32e92381 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/config.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/config.go @@ -15,6 +15,8 @@ package v3_2 import ( + "reflect" + "github.com/coreos/ignition/v2/config/merge" "github.com/coreos/ignition/v2/config/shared/errors" "github.com/coreos/ignition/v2/config/util" @@ -26,8 +28,12 @@ import ( ) func Merge(parent, child types.Config) types.Config { - res, _ := merge.MergeStructTranscribe(parent, child) - return res.(types.Config) + vParent := reflect.ValueOf(parent) + vChild := reflect.ValueOf(child) + + vRes := merge.MergeStruct(vParent, vChild) + res := vRes.Interface().(types.Config) + return res } // Parse parses the raw config into a types.Config struct and generates a report of any diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/filesystem.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/filesystem.go index 39a158969..486aea86a 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/filesystem.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/filesystem.go @@ -50,8 +50,6 @@ func (f Filesystem) validateFormat() error { if util.NotEmpty(f.Path) || util.NotEmpty(f.Label) || util.NotEmpty(f.UUID) || - f.WipeFilesystem != nil && *f.WipeFilesystem || - len(f.MountOptions) != 0 || len(f.Options) != 0 { return errors.ErrFormatNilWithOthers } diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/partition.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/partition.go index 08dca8eaf..4bb60b2b8 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/partition.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/partition.go @@ -36,10 +36,8 @@ var ( func (p Partition) Key() string { if p.Number != 0 { return fmt.Sprintf("number:%d", p.Number) - } else if p.Label != nil { - return fmt.Sprintf("label:%s", *p.Label) } else { - return "" + return fmt.Sprintf("label:%s", *p.Label) } } diff --git a/vendor/github.com/coreos/vcontext/json/json.go b/vendor/github.com/coreos/vcontext/json/json.go index 624ad2398..1dd7a553d 100644 --- a/vendor/github.com/coreos/vcontext/json/json.go +++ b/vendor/github.com/coreos/vcontext/json/json.go @@ -17,7 +17,7 @@ package json import ( "github.com/coreos/vcontext/tree" // todo: rewrite this dep - json "github.com/coreos/go-json" + json "github.com/ajeddeloh/go-json" ) func UnmarshalToContext(raw []byte) (tree.Node, error) { diff --git a/vendor/github.com/coreos/vcontext/path/path.go b/vendor/github.com/coreos/vcontext/path/path.go index 3daadc784..48f25801a 100644 --- a/vendor/github.com/coreos/vcontext/path/path.go +++ b/vendor/github.com/coreos/vcontext/path/path.go @@ -49,13 +49,8 @@ func (c ContextPath) Append(e ...interface{}) ContextPath { } func (c ContextPath) Copy() ContextPath { - // make sure to preserve reflect.DeepEqual() equality - var path []interface{} - if c.Path != nil { - path = append(path, c.Path...) - } return ContextPath{ - Path: path, + Path: append([]interface{}{}, c.Path...), Tag: c.Tag, } } diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml index 98ad417e2..0e9d6edc0 100644 --- a/vendor/github.com/ghodss/yaml/.travis.yml +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -1,8 +1,7 @@ language: go go: - - "1.9" - - "1.10" - - "1.11" + - 1.3 + - 1.4 script: - go test - go build diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go index dfd264d6c..4fb4054a8 100644 --- a/vendor/github.com/ghodss/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -1,20 +1,9 @@ -// Package yaml provides a wrapper around go-yaml designed to enable a better -// way of handling YAML when marshaling to and from structs. -// -// In short, this package first converts YAML to JSON using go-yaml and then -// uses json.Marshal and json.Unmarshal to convert to or from the struct. This -// means that it effectively reuses the JSON struct tags as well as the custom -// JSON methods MarshalJSON and UnmarshalJSON unlike go-yaml. -// -// See also http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang -// -package yaml // import "github.com/ghodss/yaml" +package yaml import ( "bytes" "encoding/json" "fmt" - "io" "reflect" "strconv" @@ -37,30 +26,15 @@ func Marshal(o interface{}) ([]byte, error) { return y, nil } -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.Unmarshal, y, o, opts) -} - -// UnmarshalStrict is like Unmarshal except that any mapping keys that are -// duplicates will result in an error. -// To also be strict about unknown fields, add the DisallowUnknownFields option. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return unmarshal(yaml.UnmarshalStrict, y, o, opts) -} - -func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o interface{}, opts []JSONOpt) error { +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { vo := reflect.ValueOf(o) - j, err := yamlToJSON(y, &vo, f) + j, err := yamlToJSON(y, &vo) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = json.Unmarshal(j, o) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -68,21 +42,6 @@ func unmarshal(f func(in []byte, out interface{}) (err error), y []byte, o inter return nil } -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - // Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. @@ -101,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -111,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) -} - -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) + return yamlToJSON(y, nil) } -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := yaml.Unmarshal(y, &yamlObj) if err != nil { return nil, err } @@ -134,7 +85,7 @@ func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, // YAML objects are not completely compatible with JSON objects (e.g. you // can have non-string keys in YAML). So, convert the YAML-compatible object // to a JSON-compatible object, failing with an error if irrecoverable - // incompatibilities happen along the way. + // incompatibilties happen along the way. jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) if err != nil { return nil, err diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a22..000000000 --- a/vendor/github.com/ghodss/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml deleted file mode 100644 index 28f740cd5..000000000 --- a/vendor/github.com/go-ole/go-ole/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -sudo: false - -go: - - 1.9.x - - 1.10.x - - 1.11.x - - tip diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md deleted file mode 100644 index 4ba6a8c64..000000000 --- a/vendor/github.com/go-ole/go-ole/ChangeLog.md +++ /dev/null @@ -1,49 +0,0 @@ -# Version 1.x.x - -* **Add more test cases and reference new test COM server project.** (Placeholder for future additions) - -# Version 1.2.0-alphaX - -**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.** - - * Added CI configuration for Travis-CI and AppVeyor. - * Added test InterfaceID and ClassID for the COM Test Server project. - * Added more inline documentation (#83). - * Added IEnumVARIANT implementation (#88). - * Added IEnumVARIANT test cases (#99, #100, #101). - * Added support for retrieving `time.Time` from VARIANT (#92). - * Added test case for IUnknown (#64). - * Added test case for IDispatch (#64). - * Added test cases for scalar variants (#64, #76). - -# Version 1.1.1 - - * Fixes for Linux build. - * Fixes for Windows build. - -# Version 1.1.0 - -The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes. - - * Move GUID out of variables.go into its own file to make new documentation available. - * Move OleError out of ole.go into its own file to make new documentation available. - * Add documentation to utility functions. - * Add documentation to variant receiver functions. - * Add documentation to ole structures. - * Make variant available to other systems outside of Windows. - * Make OLE structures available to other systems outside of Windows. - -## New Features - - * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows. - * More functions are now documented and available on godoc.org. - -# Version 1.0.1 - - 1. Fix package references from repository location change. - -# Version 1.0.0 - -This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface. - -There is no changelog for this version. Check commits for history. diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE deleted file mode 100644 index 623ec06f9..000000000 --- a/vendor/github.com/go-ole/go-ole/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright © 2013-2017 Yasuhiro Matsumoto, - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md deleted file mode 100644 index 7b577558d..000000000 --- a/vendor/github.com/go-ole/go-ole/README.md +++ /dev/null @@ -1,46 +0,0 @@ -# Go OLE - -[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) -[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) -[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole) - -Go bindings for Windows COM using shared libraries instead of cgo. - -By Yasuhiro Matsumoto. - -## Install - -To experiment with go-ole, you can just compile and run the example program: - -``` -go get github.com/go-ole/go-ole -cd /path/to/go-ole/ -go test - -cd /path/to/go-ole/example/excel -go run excel.go -``` - -## Continuous Integration - -Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run. - -**Travis-CI** - -Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server. - -**AppVeyor** - -AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server. - -The tests currently do run and do pass and this should be maintained with commits. - -## Versioning - -Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. - -This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. - -## LICENSE - -Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml deleted file mode 100644 index 0d557ac2f..000000000 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Notes: -# - Minimal appveyor.yml file is an empty file. All sections are optional. -# - Indent each level of configuration with 2 spaces. Do not use tabs! -# - All section names are case-sensitive. -# - Section names should be unique on each level. - -version: "1.3.0.{build}-alpha-{branch}" - -os: Windows Server 2012 R2 - -branches: - only: - - master - - v1.2 - - v1.1 - - v1.0 - -skip_tags: true - -clone_folder: c:\gopath\src\github.com\go-ole\go-ole - -environment: - GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" - -install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% - # - Download COM Server - - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set - - go version - - go env - - go get -u golang.org/x/tools/cmd/cover - - go get -u golang.org/x/tools/cmd/godoc - - go get -u golang.org/x/tools/cmd/stringer - -build_script: - - cd c:\gopath\src\github.com\go-ole\go-ole - - go get -v -t ./... - - go build - - go test -v -cover ./... - -# disable automatic tests -test: off - -# disable deployment -deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go deleted file mode 100644 index 6f986b189..000000000 --- a/vendor/github.com/go-ole/go-ole/com.go +++ /dev/null @@ -1,344 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unicode/utf16" - "unsafe" -) - -var ( - procCoInitialize, _ = modole32.FindProc("CoInitialize") - procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") - procCoUninitialize, _ = modole32.FindProc("CoUninitialize") - procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") - procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") - procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") - procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") - procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") - procStringFromIID, _ = modole32.FindProc("StringFromIID") - procIIDFromString, _ = modole32.FindProc("IIDFromString") - procCoGetObject, _ = modole32.FindProc("CoGetObject") - procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") - procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") - procVariantInit, _ = modoleaut32.FindProc("VariantInit") - procVariantClear, _ = modoleaut32.FindProc("VariantClear") - procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") - procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") - procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") - procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") - procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") - procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") - procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") - procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") - - procGetMessageW, _ = moduser32.FindProc("GetMessageW") - procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx - // Suggests that no value should be passed to CoInitialized. - // Could just be Call() since the parameter is optional. <-- Needs testing to be sure. - hr, _, _ := procCoInitialize.Call(uintptr(0)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) (err error) { - // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx - // Suggests that the first parameter is not only optional but should always be NULL. - hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) (err error) { - // p is ignored and won't be used. - // Avoid any variable not used errors. - p = uintptr(0) - return coInitialize() -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) (err error) { - // Avoid any variable not used errors. - p = uintptr(0) - return coInitializeEx(coinit) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() { - procCoUninitialize.Call() -} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) { - procCoTaskMemFree.Call(memptr) -} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (clsid *GUID, err error) { - var guid GUID - lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str))) - hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (clsid *GUID, err error) { - var guid GUID - lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId))) - hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid))) - if hr != 0 { - err = NewError(hr) - } - clsid = &guid - return -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (str string, err error) { - var p *uint16 - hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p))) - if hr != 0 { - err = NewError(hr) - } - str = LpOleStrToString(p) - return -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procCoCreateInstance.Call( - uintptr(unsafe.Pointer(clsid)), - 0, - CLSCTX_SERVER, - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procGetActiveObject.Call( - uintptr(unsafe.Pointer(clsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -type BindOpts struct { - CbStruct uint32 - GrfFlags uint32 - GrfMode uint32 - TickCountDeadline uint32 -} - -// GetObject retrieves pointer to active object. -func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { - if bindOpts != nil { - bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) - } - if iid == nil { - iid = IID_IUnknown - } - hr, _, _ := procCoGetObject.Call( - uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), - uintptr(unsafe.Pointer(bindOpts)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&unk))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) (err error) { - hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) (err error) { - hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) (ss *int16) { - pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v)))) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) (ss *int16) { - utf16 := utf16.Encode([]rune(v + "\x00")) - ptr := &utf16[0] - - pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1)) - ss = (*int16)(unsafe.Pointer(pss)) - return -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) (err error) { - hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v))) - return uint32(l) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) { - hr, _, _ := procCreateStdDispatch.Call( - uintptr(unsafe.Pointer(unk)), - v, - uintptr(unsafe.Pointer(ptinfo)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) { - hr, _, _ := procCreateDispTypeInfo.Call( - uintptr(unsafe.Pointer(idata)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&pptinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) { - procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length)) -} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() (lcid uint32) { - ret, _, _ := procGetUserDefaultLCID.Call() - lcid = uint32(ret) - return -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) { - r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax)) - ret = int32(r0) - return -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) (ret int32) { - r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg))) - ret = int32(r0) - return -} diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go deleted file mode 100644 index cef539d9d..000000000 --- a/vendor/github.com/go-ole/go-ole/com_func.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build !windows - -package ole - -import ( - "time" - "unsafe" -) - -// coInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func coInitialize() error { - return NewError(E_NOTIMPL) -} - -// coInitializeEx initializes COM library with concurrency model. -func coInitializeEx(coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoInitialize initializes COM library on current thread. -// -// MSDN documentation suggests that this function should not be called. Call -// CoInitializeEx() instead. The reason has to do with threading and this -// function is only for single-threaded apartments. -// -// That said, most users of the library have gotten away with just this -// function. If you are experiencing threading issues, then use -// CoInitializeEx(). -func CoInitialize(p uintptr) error { - return NewError(E_NOTIMPL) -} - -// CoInitializeEx initializes COM library with concurrency model. -func CoInitializeEx(p uintptr, coinit uint32) error { - return NewError(E_NOTIMPL) -} - -// CoUninitialize uninitializes COM Library. -func CoUninitialize() {} - -// CoTaskMemFree frees memory pointer. -func CoTaskMemFree(memptr uintptr) {} - -// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier. -// -// The Programmatic Identifier must be registered, because it will be looked up -// in the Windows Registry. The registry entry has the following keys: CLSID, -// Insertable, Protocol and Shell -// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx). -// -// programID identifies the class id with less precision and is not guaranteed -// to be unique. These are usually found in the registry under -// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of -// "Program.Component.Version" with version being optional. -// -// CLSIDFromProgID in Windows API. -func CLSIDFromProgID(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// CLSIDFromString retrieves Class ID from string representation. -// -// This is technically the string version of the GUID and will convert the -// string to object. -// -// CLSIDFromString in Windows API. -func CLSIDFromString(str string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromCLSID returns GUID formated string from GUID object. -func StringFromCLSID(clsid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// IIDFromString returns GUID from program ID. -func IIDFromString(progId string) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// StringFromIID returns GUID formatted string from GUID object. -func StringFromIID(iid *GUID) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// CreateInstance of single uninitialized object with GUID. -func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// GetActiveObject retrieves pointer to active object. -func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// VariantInit initializes variant. -func VariantInit(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// VariantClear clears value in Variant settings to VT_EMPTY. -func VariantClear(v *VARIANT) error { - return NewError(E_NOTIMPL) -} - -// SysAllocString allocates memory for string and copies string into memory. -func SysAllocString(v string) *int16 { - u := int16(0) - return &u -} - -// SysAllocStringLen copies up to length of given string returning pointer. -func SysAllocStringLen(v string) *int16 { - u := int16(0) - return &u -} - -// SysFreeString frees string system memory. This must be called with SysAllocString. -func SysFreeString(v *int16) error { - return NewError(E_NOTIMPL) -} - -// SysStringLen is the length of the system allocated string. -func SysStringLen(v *int16) uint32 { - return uint32(0) -} - -// CreateStdDispatch provides default IDispatch implementation for IUnknown. -// -// This handles default IDispatch implementation for objects. It haves a few -// limitations with only supporting one language. It will also only return -// default exception codes. -func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) { - return nil, NewError(E_NOTIMPL) -} - -// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch. -// -// This will not handle the full implementation of the interface. -func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) { - return nil, NewError(E_NOTIMPL) -} - -// copyMemory moves location of a block of memory. -func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {} - -// GetUserDefaultLCID retrieves current user default locale. -func GetUserDefaultLCID() uint32 { - return uint32(0) -} - -// GetMessage in message queue from runtime. -// -// This function appears to block. PeekMessage does not block. -func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// DispatchMessage to window procedure. -func DispatchMessage(msg *Msg) int32 { - return int32(0) -} - -func GetVariantDate(value uint64) (time.Time, error) { - return time.Now(), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go deleted file mode 100644 index b2ac2ec67..000000000 --- a/vendor/github.com/go-ole/go-ole/connect.go +++ /dev/null @@ -1,192 +0,0 @@ -package ole - -// Connection contains IUnknown for fluent interface interaction. -// -// Deprecated. Use oleutil package instead. -type Connection struct { - Object *IUnknown // Access COM -} - -// Initialize COM. -func (*Connection) Initialize() (err error) { - return coInitialize() -} - -// Uninitialize COM. -func (*Connection) Uninitialize() { - CoUninitialize() -} - -// Create IUnknown object based first on ProgId and then from String. -func (c *Connection) Create(progId string) (err error) { - var clsid *GUID - clsid, err = CLSIDFromProgID(progId) - if err != nil { - clsid, err = CLSIDFromString(progId) - if err != nil { - return - } - } - - unknown, err := CreateInstance(clsid, IID_IUnknown) - if err != nil { - return - } - c.Object = unknown - - return -} - -// Release IUnknown object. -func (c *Connection) Release() { - c.Object.Release() -} - -// Load COM object from list of programIDs or strings. -func (c *Connection) Load(names ...string) (errors []error) { - var tempErrors []error = make([]error, len(names)) - var numErrors int = 0 - for _, name := range names { - err := c.Create(name) - if err != nil { - tempErrors = append(tempErrors, err) - numErrors += 1 - continue - } - break - } - - copy(errors, tempErrors[0:numErrors]) - return -} - -// Dispatch returns Dispatch object. -func (c *Connection) Dispatch() (object *Dispatch, err error) { - dispatch, err := c.Object.QueryInterface(IID_IDispatch) - if err != nil { - return - } - object = &Dispatch{dispatch} - return -} - -// Dispatch stores IDispatch object. -type Dispatch struct { - Object *IDispatch // Dispatch object. -} - -// Call method on IDispatch with parameters. -func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(method) - if err != nil { - return - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - return -} - -// MustCall method on IDispatch with parameters. -func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(method) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_METHOD, params) - if err != nil { - panic(err) - } - - return -} - -// Get property on IDispatch with parameters. -func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - return -} - -// MustGet property on IDispatch with parameters. -func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params) - if err != nil { - panic(err) - } - return -} - -// Set property on IDispatch with parameters. -func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) { - id, err := d.GetId(name) - if err != nil { - return - } - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - return -} - -// MustSet property on IDispatch with parameters. -func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) { - id, err := d.GetId(name) - if err != nil { - panic(err) - } - - result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params) - if err != nil { - panic(err) - } - return -} - -// GetId retrieves ID of name on IDispatch. -func (d *Dispatch) GetId(name string) (id int32, err error) { - var dispid []int32 - dispid, err = d.Object.GetIDsOfName([]string{name}) - if err != nil { - return - } - id = dispid[0] - return -} - -// GetIds retrieves all IDs of names on IDispatch. -func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) { - dispid, err = d.Object.GetIDsOfName(names) - return -} - -// Invoke IDispatch on DisplayID of dispatch type with parameters. -// -// There have been problems where if send cascading params..., it would error -// out because the parameters would be empty. -func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) { - if len(params) < 1 { - result, err = d.Object.Invoke(id, dispatch) - } else { - result, err = d.Object.Invoke(id, dispatch, params...) - } - return -} - -// Release IDispatch object. -func (d *Dispatch) Release() { - d.Object.Release() -} - -// Connect initializes COM and attempts to load IUnknown based on given names. -func Connect(names ...string) (connection *Connection) { - connection.Initialize() - connection.Load(names...) - return -} diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go deleted file mode 100644 index fd0c6d74b..000000000 --- a/vendor/github.com/go-ole/go-ole/constants.go +++ /dev/null @@ -1,153 +0,0 @@ -package ole - -const ( - CLSCTX_INPROC_SERVER = 1 - CLSCTX_INPROC_HANDLER = 2 - CLSCTX_LOCAL_SERVER = 4 - CLSCTX_INPROC_SERVER16 = 8 - CLSCTX_REMOTE_SERVER = 16 - CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER - CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER - CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER -) - -const ( - COINIT_APARTMENTTHREADED = 0x2 - COINIT_MULTITHREADED = 0x0 - COINIT_DISABLE_OLE1DDE = 0x4 - COINIT_SPEED_OVER_MEMORY = 0x8 -) - -const ( - DISPATCH_METHOD = 1 - DISPATCH_PROPERTYGET = 2 - DISPATCH_PROPERTYPUT = 4 - DISPATCH_PROPERTYPUTREF = 8 -) - -const ( - S_OK = 0x00000000 - E_UNEXPECTED = 0x8000FFFF - E_NOTIMPL = 0x80004001 - E_OUTOFMEMORY = 0x8007000E - E_INVALIDARG = 0x80070057 - E_NOINTERFACE = 0x80004002 - E_POINTER = 0x80004003 - E_HANDLE = 0x80070006 - E_ABORT = 0x80004004 - E_FAIL = 0x80004005 - E_ACCESSDENIED = 0x80070005 - E_PENDING = 0x8000000A - - CO_E_CLASSSTRING = 0x800401F3 -) - -const ( - CC_FASTCALL = iota - CC_CDECL - CC_MSCPASCAL - CC_PASCAL = CC_MSCPASCAL - CC_MACPASCAL - CC_STDCALL - CC_FPFASTCALL - CC_SYSCALL - CC_MPWCDECL - CC_MPWPASCAL - CC_MAX = CC_MPWPASCAL -) - -type VT uint16 - -const ( - VT_EMPTY VT = 0x0 - VT_NULL VT = 0x1 - VT_I2 VT = 0x2 - VT_I4 VT = 0x3 - VT_R4 VT = 0x4 - VT_R8 VT = 0x5 - VT_CY VT = 0x6 - VT_DATE VT = 0x7 - VT_BSTR VT = 0x8 - VT_DISPATCH VT = 0x9 - VT_ERROR VT = 0xa - VT_BOOL VT = 0xb - VT_VARIANT VT = 0xc - VT_UNKNOWN VT = 0xd - VT_DECIMAL VT = 0xe - VT_I1 VT = 0x10 - VT_UI1 VT = 0x11 - VT_UI2 VT = 0x12 - VT_UI4 VT = 0x13 - VT_I8 VT = 0x14 - VT_UI8 VT = 0x15 - VT_INT VT = 0x16 - VT_UINT VT = 0x17 - VT_VOID VT = 0x18 - VT_HRESULT VT = 0x19 - VT_PTR VT = 0x1a - VT_SAFEARRAY VT = 0x1b - VT_CARRAY VT = 0x1c - VT_USERDEFINED VT = 0x1d - VT_LPSTR VT = 0x1e - VT_LPWSTR VT = 0x1f - VT_RECORD VT = 0x24 - VT_INT_PTR VT = 0x25 - VT_UINT_PTR VT = 0x26 - VT_FILETIME VT = 0x40 - VT_BLOB VT = 0x41 - VT_STREAM VT = 0x42 - VT_STORAGE VT = 0x43 - VT_STREAMED_OBJECT VT = 0x44 - VT_STORED_OBJECT VT = 0x45 - VT_BLOB_OBJECT VT = 0x46 - VT_CF VT = 0x47 - VT_CLSID VT = 0x48 - VT_BSTR_BLOB VT = 0xfff - VT_VECTOR VT = 0x1000 - VT_ARRAY VT = 0x2000 - VT_BYREF VT = 0x4000 - VT_RESERVED VT = 0x8000 - VT_ILLEGAL VT = 0xffff - VT_ILLEGALMASKED VT = 0xfff - VT_TYPEMASK VT = 0xfff -) - -const ( - DISPID_UNKNOWN = -1 - DISPID_VALUE = 0 - DISPID_PROPERTYPUT = -3 - DISPID_NEWENUM = -4 - DISPID_EVALUATE = -5 - DISPID_CONSTRUCTOR = -6 - DISPID_DESTRUCTOR = -7 - DISPID_COLLECT = -8 -) - -const ( - TKIND_ENUM = 1 - TKIND_RECORD = 2 - TKIND_MODULE = 3 - TKIND_INTERFACE = 4 - TKIND_DISPATCH = 5 - TKIND_COCLASS = 6 - TKIND_ALIAS = 7 - TKIND_UNION = 8 - TKIND_MAX = 9 -) - -// Safe Array Feature Flags - -const ( - FADF_AUTO = 0x0001 - FADF_STATIC = 0x0002 - FADF_EMBEDDED = 0x0004 - FADF_FIXEDSIZE = 0x0010 - FADF_RECORD = 0x0020 - FADF_HAVEIID = 0x0040 - FADF_HAVEVARTYPE = 0x0080 - FADF_BSTR = 0x0100 - FADF_UNKNOWN = 0x0200 - FADF_DISPATCH = 0x0400 - FADF_VARIANT = 0x0800 - FADF_RESERVED = 0xF008 -) diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go deleted file mode 100644 index 096b456d3..000000000 --- a/vendor/github.com/go-ole/go-ole/error.go +++ /dev/null @@ -1,51 +0,0 @@ -package ole - -// OleError stores COM errors. -type OleError struct { - hr uintptr - description string - subError error -} - -// NewError creates new error with HResult. -func NewError(hr uintptr) *OleError { - return &OleError{hr: hr} -} - -// NewErrorWithDescription creates new COM error with HResult and description. -func NewErrorWithDescription(hr uintptr, description string) *OleError { - return &OleError{hr: hr, description: description} -} - -// NewErrorWithSubError creates new COM error with parent error. -func NewErrorWithSubError(hr uintptr, description string, err error) *OleError { - return &OleError{hr: hr, description: description, subError: err} -} - -// Code is the HResult. -func (v *OleError) Code() uintptr { - return uintptr(v.hr) -} - -// String description, either manually set or format message with error code. -func (v *OleError) String() string { - if v.description != "" { - return errstr(int(v.hr)) + " (" + v.description + ")" - } - return errstr(int(v.hr)) -} - -// Error implements error interface. -func (v *OleError) Error() string { - return v.String() -} - -// Description retrieves error summary, if there is one. -func (v *OleError) Description() string { - return v.description -} - -// SubError returns parent error, if there is one. -func (v *OleError) SubError() error { - return v.subError -} diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go deleted file mode 100644 index 8a2ffaa27..000000000 --- a/vendor/github.com/go-ole/go-ole/error_func.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package ole - -// errstr converts error code to string. -func errstr(errno int) string { - return "" -} diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go deleted file mode 100644 index d0e8e6859..000000000 --- a/vendor/github.com/go-ole/go-ole/error_windows.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build windows - -package ole - -import ( - "fmt" - "syscall" - "unicode/utf16" -) - -// errstr converts error code to string. -func errstr(errno int) string { - // ask windows for the remaining errors - var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS - b := make([]uint16, 300) - n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil) - if err != nil { - return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err) - } - // trim terminating \r and \n - for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- { - } - return string(utf16.Decode(b[:n])) -} diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go deleted file mode 100644 index 8d20f68fb..000000000 --- a/vendor/github.com/go-ole/go-ole/guid.go +++ /dev/null @@ -1,284 +0,0 @@ -package ole - -var ( - // IID_NULL is null Interface ID, used when no other Interface ID is known. - IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}") - - // IID_IUnknown is for IUnknown interfaces. - IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}") - - // IID_IDispatch is for IDispatch interfaces. - IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}") - - // IID_IEnumVariant is for IEnumVariant interfaces - IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}") - - // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces. - IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}") - - // IID_IConnectionPoint is for IConnectionPoint interfaces. - IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}") - - // IID_IInspectable is for IInspectable interfaces. - IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}") - - // IID_IProvideClassInfo is for IProvideClassInfo interfaces. - IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}") -) - -// These are for testing and not part of any library. -var ( - // IID_ICOMTestString is for ICOMTestString interfaces. - // - // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED} - IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}") - - // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces. - // - // {BEB06610-EB84-4155-AF58-E2BFF53680B4} - IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}") - - // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces. - // - // {DAA3F9FA-761E-4976-A860-8364CE55F6FC} - IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}") - - // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces. - // - // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0} - IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}") - - // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces. - // - // {8D437CBC-B3ED-485C-BC32-C336432A1623} - IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}") - - // IID_ICOMTestFloat is for ICOMTestFloat interfaces. - // - // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C} - IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}") - - // IID_ICOMTestDouble is for ICOMTestDouble interfaces. - // - // {BF908A81-8687-4E93-999F-D86FAB284BA0} - IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}") - - // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces. - // - // {D530E7A6-4EE8-40D1-8931-3D63B8605010} - IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}") - - // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces. - // - // {6485B1EF-D780-4834-A4FE-1EBB51746CA3} - IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}") - - // IID_ICOMTestTypes is for ICOMTestTypes interfaces. - // - // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0} - IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}") - - // CLSID_COMEchoTestObject is for COMEchoTestObject class. - // - // {3C24506A-AE9E-4D50-9157-EF317281F1B0} - CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}") - - // CLSID_COMTestScalarClass is for COMTestScalarClass class. - // - // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86} - CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}") -) - -const hextable = "0123456789ABCDEF" -const emptyGUID = "{00000000-0000-0000-0000-000000000000}" - -// GUID is Windows API specific GUID type. -// -// This exists to match Windows GUID type for direct passing for COM. -// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx. -type GUID struct { - Data1 uint32 - Data2 uint16 - Data3 uint16 - Data4 [8]byte -} - -// NewGUID converts the given string into a globally unique identifier that is -// compliant with the Windows API. -// -// The supplied string may be in any of these formats: -// -// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX -// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// The conversion of the supplied string is not case-sensitive. -func NewGUID(guid string) *GUID { - d := []byte(guid) - var d1, d2, d3, d4a, d4b []byte - - switch len(d) { - case 38: - if d[0] != '{' || d[37] != '}' { - return nil - } - d = d[1:37] - fallthrough - case 36: - if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' { - return nil - } - d1 = d[0:8] - d2 = d[9:13] - d3 = d[14:18] - d4a = d[19:23] - d4b = d[24:36] - case 32: - d1 = d[0:8] - d2 = d[8:12] - d3 = d[12:16] - d4a = d[16:20] - d4b = d[20:32] - default: - return nil - } - - var g GUID - var ok1, ok2, ok3, ok4 bool - g.Data1, ok1 = decodeHexUint32(d1) - g.Data2, ok2 = decodeHexUint16(d2) - g.Data3, ok3 = decodeHexUint16(d3) - g.Data4, ok4 = decodeHexByte64(d4a, d4b) - if ok1 && ok2 && ok3 && ok4 { - return &g - } - return nil -} - -func decodeHexUint32(src []byte) (value uint32, ok bool) { - var b1, b2, b3, b4 byte - var ok1, ok2, ok3, ok4 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - b3, ok3 = decodeHexByte(src[4], src[5]) - b4, ok4 = decodeHexByte(src[6], src[7]) - value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4) - ok = ok1 && ok2 && ok3 && ok4 - return -} - -func decodeHexUint16(src []byte) (value uint16, ok bool) { - var b1, b2 byte - var ok1, ok2 bool - b1, ok1 = decodeHexByte(src[0], src[1]) - b2, ok2 = decodeHexByte(src[2], src[3]) - value = (uint16(b1) << 8) | uint16(b2) - ok = ok1 && ok2 - return -} - -func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) { - var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool - value[0], ok1 = decodeHexByte(s1[0], s1[1]) - value[1], ok2 = decodeHexByte(s1[2], s1[3]) - value[2], ok3 = decodeHexByte(s2[0], s2[1]) - value[3], ok4 = decodeHexByte(s2[2], s2[3]) - value[4], ok5 = decodeHexByte(s2[4], s2[5]) - value[5], ok6 = decodeHexByte(s2[6], s2[7]) - value[6], ok7 = decodeHexByte(s2[8], s2[9]) - value[7], ok8 = decodeHexByte(s2[10], s2[11]) - ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8 - return -} - -func decodeHexByte(c1, c2 byte) (value byte, ok bool) { - var n1, n2 byte - var ok1, ok2 bool - n1, ok1 = decodeHexChar(c1) - n2, ok2 = decodeHexChar(c2) - value = (n1 << 4) | n2 - ok = ok1 && ok2 - return -} - -func decodeHexChar(c byte) (byte, bool) { - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - - return 0, false -} - -// String converts the GUID to string form. It will adhere to this pattern: -// -// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX} -// -// If the GUID is nil, the string representation of an empty GUID is returned: -// -// {00000000-0000-0000-0000-000000000000} -func (guid *GUID) String() string { - if guid == nil { - return emptyGUID - } - - var c [38]byte - c[0] = '{' - putUint32Hex(c[1:9], guid.Data1) - c[9] = '-' - putUint16Hex(c[10:14], guid.Data2) - c[14] = '-' - putUint16Hex(c[15:19], guid.Data3) - c[19] = '-' - putByteHex(c[20:24], guid.Data4[0:2]) - c[24] = '-' - putByteHex(c[25:37], guid.Data4[2:8]) - c[37] = '}' - return string(c[:]) -} - -func putUint32Hex(b []byte, v uint32) { - b[0] = hextable[byte(v>>24)>>4] - b[1] = hextable[byte(v>>24)&0x0f] - b[2] = hextable[byte(v>>16)>>4] - b[3] = hextable[byte(v>>16)&0x0f] - b[4] = hextable[byte(v>>8)>>4] - b[5] = hextable[byte(v>>8)&0x0f] - b[6] = hextable[byte(v)>>4] - b[7] = hextable[byte(v)&0x0f] -} - -func putUint16Hex(b []byte, v uint16) { - b[0] = hextable[byte(v>>8)>>4] - b[1] = hextable[byte(v>>8)&0x0f] - b[2] = hextable[byte(v)>>4] - b[3] = hextable[byte(v)&0x0f] -} - -func putByteHex(dst, src []byte) { - for i := 0; i < len(src); i++ { - dst[i*2] = hextable[src[i]>>4] - dst[i*2+1] = hextable[src[i]&0x0f] - } -} - -// IsEqualGUID compares two GUID. -// -// Not constant time comparison. -func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool { - return guid1.Data1 == guid2.Data1 && - guid1.Data2 == guid2.Data2 && - guid1.Data3 == guid2.Data3 && - guid1.Data4[0] == guid2.Data4[0] && - guid1.Data4[1] == guid2.Data4[1] && - guid1.Data4[2] == guid2.Data4[2] && - guid1.Data4[3] == guid2.Data4[3] && - guid1.Data4[4] == guid2.Data4[4] && - guid1.Data4[5] == guid2.Data4[5] && - guid1.Data4[6] == guid2.Data4[6] && - guid1.Data4[7] == guid2.Data4[7] -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go deleted file mode 100644 index 9e6c49f41..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go +++ /dev/null @@ -1,20 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPoint struct { - IUnknown -} - -type IConnectionPointVtbl struct { - IUnknownVtbl - GetConnectionInterface uintptr - GetConnectionPointContainer uintptr - Advise uintptr - Unadvise uintptr - EnumConnections uintptr -} - -func (v *IConnectionPoint) VTable() *IConnectionPointVtbl { - return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go deleted file mode 100644 index 5414dc3cd..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package ole - -import "unsafe" - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - return int32(0) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go deleted file mode 100644 index 32bc18324..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 { - // XXX: This doesn't look like it does what it's supposed to - return release((*IUnknown)(unsafe.Pointer(v))) -} - -func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Advise, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(unknown)), - uintptr(unsafe.Pointer(&cookie))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().Unadvise, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(cookie), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go deleted file mode 100644 index 165860d19..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go +++ /dev/null @@ -1,17 +0,0 @@ -package ole - -import "unsafe" - -type IConnectionPointContainer struct { - IUnknown -} - -type IConnectionPointContainerVtbl struct { - IUnknownVtbl - EnumConnectionPoints uintptr - FindConnectionPoint uintptr -} - -func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl { - return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go deleted file mode 100644 index 5dfa42aae..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package ole - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go deleted file mode 100644 index ad30d79ef..000000000 --- a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error { - return NewError(E_NOTIMPL) -} - -func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) { - hr, _, _ := syscall.Syscall( - v.VTable().FindConnectionPoint, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(point))) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go deleted file mode 100644 index d4af12409..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch.go +++ /dev/null @@ -1,94 +0,0 @@ -package ole - -import "unsafe" - -type IDispatch struct { - IUnknown -} - -type IDispatchVtbl struct { - IUnknownVtbl - GetTypeInfoCount uintptr - GetTypeInfo uintptr - GetIDsOfNames uintptr - Invoke uintptr -} - -func (v *IDispatch) VTable() *IDispatchVtbl { - return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) { - dispid, err = getIDsOfName(v, names) - return -} - -func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - result, err = invoke(v, dispid, dispatch, params...) - return -} - -func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) { - c, err = getTypeInfoCount(v) - return -} - -func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) { - tinfo, err = getTypeInfo(v) - return -} - -// GetSingleIDOfName is a helper that returns single display ID for IDispatch name. -// -// This replaces the common pattern of attempting to get a single name from the list of available -// IDs. It gives the first ID, if it is available. -func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) { - var displayIDs []int32 - displayIDs, err = v.GetIDsOfName([]string{name}) - if err != nil { - return - } - displayID = displayIDs[0] - return -} - -// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke. -// -// Accepts name and will attempt to retrieve Display ID to pass to Invoke. -// -// Passing params as an array is a workaround that could be fixed in later versions of Go that -// prevent passing empty params. During testing it was discovered that this is an acceptable way of -// getting around not being able to pass params normally. -func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) { - displayID, err := v.GetSingleIDOfName(name) - if err != nil { - return - } - - if len(params) < 1 { - result, err = v.Invoke(displayID, dispatch) - } else { - result, err = v.Invoke(displayID, dispatch, params...) - } - - return -} - -// CallMethod invokes named function with arguments on object. -func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params) -} - -// GetProperty retrieves the property with the name with the ability to pass arguments. -// -// Most of the time you will not need to pass arguments as most objects do not allow for this -// feature. Or at least, should not allow for this feature. Some servers don't follow best practices -// and this is provided for those edge cases. -func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params) -} - -// PutProperty attempts to mutate a property in the object. -func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) { - return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go deleted file mode 100644 index b8fbbe319..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) { - return []int32{}, NewError(E_NOTIMPL) -} - -func getTypeInfoCount(disp *IDispatch) (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} - -func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) { - return nil, NewError(E_NOTIMPL) -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go deleted file mode 100644 index 6ec180b55..000000000 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ /dev/null @@ -1,200 +0,0 @@ -// +build windows - -package ole - -import ( - "math/big" - "syscall" - "time" - "unsafe" -) - -func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) { - wnames := make([]*uint16, len(names)) - for i := 0; i < len(names); i++ { - wnames[i] = syscall.StringToUTF16Ptr(names[i]) - } - dispid = make([]int32, len(names)) - namelen := uint32(len(names)) - hr, _, _ := syscall.Syscall6( - disp.VTable().GetIDsOfNames, - 6, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(unsafe.Pointer(&wnames[0])), - uintptr(namelen), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&dispid[0]))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfoCount(disp *IDispatch) (c uint32, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfoCount, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&c)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetTypeInfo, - 3, - uintptr(unsafe.Pointer(disp)), - uintptr(GetUserDefaultLCID()), - uintptr(unsafe.Pointer(&tinfo))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) { - var dispparams DISPPARAMS - - if dispatch&DISPATCH_PROPERTYPUT != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { - dispnames := [1]int32{DISPID_PROPERTYPUT} - dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) - dispparams.cNamedArgs = 1 - } - var vargs []VARIANT - if len(params) > 0 { - vargs = make([]VARIANT, len(params)) - for i, v := range params { - //n := len(params)-i-1 - n := len(params) - i - 1 - VariantInit(&vargs[n]) - switch vv := v.(type) { - case bool: - if vv { - vargs[n] = NewVariant(VT_BOOL, 0xffff) - } else { - vargs[n] = NewVariant(VT_BOOL, 0) - } - case *bool: - vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool))))) - case uint8: - vargs[n] = NewVariant(VT_I1, int64(v.(uint8))) - case *uint8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int8: - vargs[n] = NewVariant(VT_I1, int64(v.(int8))) - case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) - case int16: - vargs[n] = NewVariant(VT_I2, int64(v.(int16))) - case *int16: - vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16))))) - case uint16: - vargs[n] = NewVariant(VT_UI2, int64(v.(uint16))) - case *uint16: - vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16))))) - case int32: - vargs[n] = NewVariant(VT_I4, int64(v.(int32))) - case *int32: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32))))) - case uint32: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint32))) - case *uint32: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32))))) - case int64: - vargs[n] = NewVariant(VT_I8, int64(v.(int64))) - case *int64: - vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64))))) - case uint64: - vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64)))) - case *uint64: - vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64))))) - case int: - vargs[n] = NewVariant(VT_I4, int64(v.(int))) - case *int: - vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int))))) - case uint: - vargs[n] = NewVariant(VT_UI4, int64(v.(uint))) - case *uint: - vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint))))) - case float32: - vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv))) - case *float32: - vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32))))) - case float64: - vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) - case *float64: - vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) - case *big.Int: - vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) - case string: - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) - case *string: - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string))))) - case time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s))))) - case *time.Time: - s := vv.Format("2006-01-02 15:04:05") - vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s)))) - case *IDispatch: - vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch))))) - case **IDispatch: - vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch))))) - case nil: - vargs[n] = NewVariant(VT_NULL, 0) - case *VARIANT: - vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT))))) - case []byte: - safeByteArray := safeArrayFromByteSlice(v.([]byte)) - vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - case []string: - safeByteArray := safeArrayFromStringSlice(v.([]string)) - vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray)))) - defer VariantClear(&vargs[n]) - default: - panic("unknown type") - } - } - dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0])) - dispparams.cArgs = uint32(len(params)) - } - - result = new(VARIANT) - var excepInfo EXCEPINFO - VariantInit(result) - hr, _, _ := syscall.Syscall9( - disp.VTable().Invoke, - 9, - uintptr(unsafe.Pointer(disp)), - uintptr(dispid), - uintptr(unsafe.Pointer(IID_NULL)), - uintptr(GetUserDefaultLCID()), - uintptr(dispatch), - uintptr(unsafe.Pointer(&dispparams)), - uintptr(unsafe.Pointer(result)), - uintptr(unsafe.Pointer(&excepInfo)), - 0) - if hr != 0 { - err = NewErrorWithSubError(hr, BstrToString(excepInfo.bstrDescription), excepInfo) - } - for i, varg := range vargs { - n := len(params) - i - 1 - if varg.VT == VT_BSTR && varg.Val != 0 { - SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val))))) - } - if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 { - *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val)))) - } - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go deleted file mode 100644 index 243389754..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant.go +++ /dev/null @@ -1,19 +0,0 @@ -package ole - -import "unsafe" - -type IEnumVARIANT struct { - IUnknown -} - -type IEnumVARIANTVtbl struct { - IUnknownVtbl - Next uintptr - Skip uintptr - Reset uintptr - Clone uintptr -} - -func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl { - return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go deleted file mode 100644 index c14848199..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) { - return nil, NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Reset() error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Skip(celt uint) error { - return NewError(E_NOTIMPL) -} - -func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) { - return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go deleted file mode 100644 index 4781f3b8b..000000000 --- a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Clone, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(unsafe.Pointer(&cloned)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Reset() (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Reset, - 1, - uintptr(unsafe.Pointer(enum)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Skip(celt uint) (err error) { - hr, _, _ := syscall.Syscall( - enum.VTable().Skip, - 2, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} - -func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) { - hr, _, _ := syscall.Syscall6( - enum.VTable().Next, - 4, - uintptr(unsafe.Pointer(enum)), - uintptr(celt), - uintptr(unsafe.Pointer(&array)), - uintptr(unsafe.Pointer(&length)), - 0, - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go deleted file mode 100644 index f4a19e253..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable.go +++ /dev/null @@ -1,18 +0,0 @@ -package ole - -import "unsafe" - -type IInspectable struct { - IUnknown -} - -type IInspectableVtbl struct { - IUnknownVtbl - GetIIds uintptr - GetRuntimeClassName uintptr - GetTrustLevel uintptr -} - -func (v *IInspectable) VTable() *IInspectableVtbl { - return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go deleted file mode 100644 index 348829bf0..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_func.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package ole - -func (v *IInspectable) GetIids() ([]*GUID, error) { - return []*GUID{}, NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetRuntimeClassName() (string, error) { - return "", NewError(E_NOTIMPL) -} - -func (v *IInspectable) GetTrustLevel() (uint32, error) { - return uint32(0), NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go deleted file mode 100644 index 4519a4aa4..000000000 --- a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go +++ /dev/null @@ -1,72 +0,0 @@ -// +build windows - -package ole - -import ( - "bytes" - "encoding/binary" - "reflect" - "syscall" - "unsafe" -) - -func (v *IInspectable) GetIids() (iids []*GUID, err error) { - var count uint32 - var array uintptr - hr, _, _ := syscall.Syscall( - v.VTable().GetIIds, - 3, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&count)), - uintptr(unsafe.Pointer(&array))) - if hr != 0 { - err = NewError(hr) - return - } - defer CoTaskMemFree(array) - - iids = make([]*GUID, count) - byteCount := count * uint32(unsafe.Sizeof(GUID{})) - slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)} - byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr)) - reader := bytes.NewReader(byteSlice) - for i := range iids { - guid := GUID{} - err = binary.Read(reader, binary.LittleEndian, &guid) - if err != nil { - return - } - iids[i] = &guid - } - return -} - -func (v *IInspectable) GetRuntimeClassName() (s string, err error) { - var hstring HString - hr, _, _ := syscall.Syscall( - v.VTable().GetRuntimeClassName, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&hstring)), - 0) - if hr != 0 { - err = NewError(hr) - return - } - s = hstring.String() - DeleteHString(hstring) - return -} - -func (v *IInspectable) GetTrustLevel() (level uint32, err error) { - hr, _, _ := syscall.Syscall( - v.VTable().GetTrustLevel, - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&level)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go deleted file mode 100644 index 25f3a6f24..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go +++ /dev/null @@ -1,21 +0,0 @@ -package ole - -import "unsafe" - -type IProvideClassInfo struct { - IUnknown -} - -type IProvideClassInfoVtbl struct { - IUnknownVtbl - GetClassInfo uintptr -} - -func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl { - return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) { - cinfo, err = getClassInfo(v) - return -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go deleted file mode 100644 index 7e3cb63ea..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go deleted file mode 100644 index 2ad016394..000000000 --- a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) { - hr, _, _ := syscall.Syscall( - disp.VTable().GetClassInfo, - 2, - uintptr(unsafe.Pointer(disp)), - uintptr(unsafe.Pointer(&tinfo)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go deleted file mode 100644 index dd3c5e21b..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -package ole - -import "unsafe" - -type ITypeInfo struct { - IUnknown -} - -type ITypeInfoVtbl struct { - IUnknownVtbl - GetTypeAttr uintptr - GetTypeComp uintptr - GetFuncDesc uintptr - GetVarDesc uintptr - GetNames uintptr - GetRefTypeOfImplType uintptr - GetImplTypeFlags uintptr - GetIDsOfNames uintptr - Invoke uintptr - GetDocumentation uintptr - GetDllEntry uintptr - GetRefTypeInfo uintptr - AddressOfMember uintptr - CreateInstance uintptr - GetMops uintptr - GetContainingTypeLib uintptr - ReleaseTypeAttr uintptr - ReleaseFuncDesc uintptr - ReleaseVarDesc uintptr -} - -func (v *ITypeInfo) VTable() *ITypeInfoVtbl { - return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable)) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go deleted file mode 100644 index 8364a659b..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package ole - -func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) { - return nil, NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go deleted file mode 100644 index 54782b3da..000000000 --- a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" - "unsafe" -) - -func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) { - hr, _, _ := syscall.Syscall( - uintptr(v.VTable().GetTypeAttr), - 2, - uintptr(unsafe.Pointer(v)), - uintptr(unsafe.Pointer(&tattr)), - 0) - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go deleted file mode 100644 index 108f28ea6..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown.go +++ /dev/null @@ -1,57 +0,0 @@ -package ole - -import "unsafe" - -type IUnknown struct { - RawVTable *interface{} -} - -type IUnknownVtbl struct { - QueryInterface uintptr - AddRef uintptr - Release uintptr -} - -type UnknownLike interface { - QueryInterface(iid *GUID) (disp *IDispatch, err error) - AddRef() int32 - Release() int32 -} - -func (v *IUnknown) VTable() *IUnknownVtbl { - return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable)) -} - -func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error { - return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj) -} - -func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) { - err = v.PutQueryInterface(interfaceID, &dispatch) - return -} - -func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) { - err = v.PutQueryInterface(interfaceID, &enum) - return -} - -func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) { - return queryInterface(v, iid) -} - -func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) { - unk, err := queryInterface(v, iid) - if err != nil { - panic(err) - } - return unk -} - -func (v *IUnknown) AddRef() int32 { - return addRef(v) -} - -func (v *IUnknown) Release() int32 { - return release(v) -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go deleted file mode 100644 index d0a62cfd7..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_func.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package ole - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - return NewError(E_NOTIMPL) -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - return nil, NewError(E_NOTIMPL) -} - -func addRef(unk *IUnknown) int32 { - return 0 -} - -func release(unk *IUnknown) int32 { - return 0 -} diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go deleted file mode 100644 index ede5bb8c1..000000000 --- a/vendor/github.com/go-ole/go-ole/iunknown_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unsafe" -) - -func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) { - selfValue := reflect.ValueOf(self).Elem() - objValue := reflect.ValueOf(obj).Elem() - - hr, _, _ := syscall.Syscall( - method, - 3, - selfValue.UnsafeAddr(), - uintptr(unsafe.Pointer(interfaceID)), - objValue.Addr().Pointer()) - if hr != 0 { - err = NewError(hr) - } - return -} - -func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) { - hr, _, _ := syscall.Syscall( - unk.VTable().QueryInterface, - 3, - uintptr(unsafe.Pointer(unk)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&disp))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func addRef(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().AddRef, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} - -func release(unk *IUnknown) int32 { - ret, _, _ := syscall.Syscall( - unk.VTable().Release, - 1, - uintptr(unsafe.Pointer(unk)), - 0, - 0) - return int32(ret) -} diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go deleted file mode 100644 index e2ae4f4bb..000000000 --- a/vendor/github.com/go-ole/go-ole/ole.go +++ /dev/null @@ -1,157 +0,0 @@ -package ole - -import ( - "fmt" - "strings" -) - -// DISPPARAMS are the arguments that passed to methods or property. -type DISPPARAMS struct { - rgvarg uintptr - rgdispidNamedArgs uintptr - cArgs uint32 - cNamedArgs uint32 -} - -// EXCEPINFO defines exception info. -type EXCEPINFO struct { - wCode uint16 - wReserved uint16 - bstrSource *uint16 - bstrDescription *uint16 - bstrHelpFile *uint16 - dwHelpContext uint32 - pvReserved uintptr - pfnDeferredFillIn uintptr - scode uint32 -} - -// WCode return wCode in EXCEPINFO. -func (e EXCEPINFO) WCode() uint16 { - return e.wCode -} - -// SCODE return scode in EXCEPINFO. -func (e EXCEPINFO) SCODE() uint32 { - return e.scode -} - -// String convert EXCEPINFO to string. -func (e EXCEPINFO) String() string { - var src, desc, hlp string - if e.bstrSource == nil { - src = "" - } else { - src = BstrToString(e.bstrSource) - } - - if e.bstrDescription == nil { - desc = "" - } else { - desc = BstrToString(e.bstrDescription) - } - - if e.bstrHelpFile == nil { - hlp = "" - } else { - hlp = BstrToString(e.bstrHelpFile) - } - - return fmt.Sprintf( - "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x", - e.wCode, src, desc, hlp, e.dwHelpContext, e.scode, - ) -} - -// Error implements error interface and returns error string. -func (e EXCEPINFO) Error() string { - if e.bstrDescription != nil { - return strings.TrimSpace(BstrToString(e.bstrDescription)) - } - - src := "Unknown" - if e.bstrSource != nil { - src = BstrToString(e.bstrSource) - } - - code := e.scode - if e.wCode != 0 { - code = uint32(e.wCode) - } - - return fmt.Sprintf("%v: %#x", src, code) -} - -// PARAMDATA defines parameter data type. -type PARAMDATA struct { - Name *int16 - Vt uint16 -} - -// METHODDATA defines method info. -type METHODDATA struct { - Name *uint16 - Data *PARAMDATA - Dispid int32 - Meth uint32 - CC int32 - CArgs uint32 - Flags uint16 - VtReturn uint32 -} - -// INTERFACEDATA defines interface info. -type INTERFACEDATA struct { - MethodData *METHODDATA - CMembers uint32 -} - -// Point is 2D vector type. -type Point struct { - X int32 - Y int32 -} - -// Msg is message between processes. -type Msg struct { - Hwnd uint32 - Message uint32 - Wparam int32 - Lparam int32 - Time uint32 - Pt Point -} - -// TYPEDESC defines data type. -type TYPEDESC struct { - Hreftype uint32 - VT uint16 -} - -// IDLDESC defines IDL info. -type IDLDESC struct { - DwReserved uint32 - WIDLFlags uint16 -} - -// TYPEATTR defines type info. -type TYPEATTR struct { - Guid GUID - Lcid uint32 - dwReserved uint32 - MemidConstructor int32 - MemidDestructor int32 - LpstrSchema *uint16 - CbSizeInstance uint32 - Typekind int32 - CFuncs uint16 - CVars uint16 - CImplTypes uint16 - CbSizeVft uint16 - CbAlignment uint16 - WTypeFlags uint16 - WMajorVerNum uint16 - WMinorVerNum uint16 - TdescAlias TYPEDESC - IdldescType IDLDESC -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go deleted file mode 100644 index 60df73cda..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -type stdDispatch struct { - lpVtbl *stdDispatchVtbl - ref int32 - iid *ole.GUID - iface interface{} - funcMap map[string]int32 -} - -type stdDispatchVtbl struct { - pQueryInterface uintptr - pAddRef uintptr - pRelease uintptr - pGetTypeInfoCount uintptr - pGetTypeInfo uintptr - pGetIDsOfNames uintptr - pInvoke uintptr -} - -func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - *punk = nil - if ole.IsEqualGUID(iid, ole.IID_IUnknown) || - ole.IsEqualGUID(iid, ole.IID_IDispatch) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - if ole.IsEqualGUID(iid, pthis.iid) { - dispAddRef(this) - *punk = this - return ole.S_OK - } - return ole.E_NOINTERFACE -} - -func dispAddRef(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref++ - return pthis.ref -} - -func dispRelease(this *ole.IUnknown) int32 { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - pthis.ref-- - return pthis.ref -} - -func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - names := make([]string, len(wnames)) - for i := 0; i < len(names); i++ { - names[i] = ole.LpOleStrToString(wnames[i]) - } - for n := 0; n < namelen; n++ { - if id, ok := pthis.funcMap[names[n]]; ok { - pdisp[n] = id - } - } - return ole.S_OK -} - -func dispGetTypeInfoCount(pcount *int) uintptr { - if pcount != nil { - *pcount = 0 - } - return ole.S_OK -} - -func dispGetTypeInfo(ptypeif *uintptr) uintptr { - return ole.E_NOTIMPL -} - -func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr { - pthis := (*stdDispatch)(unsafe.Pointer(this)) - found := "" - for name, id := range pthis.funcMap { - if id == dispid { - found = name - } - } - if found != "" { - rv := reflect.ValueOf(pthis.iface).Elem() - rm := rv.MethodByName(found) - rr := rm.Call([]reflect.Value{}) - println(len(rr)) - return ole.S_OK - } - return ole.E_NOTIMPL -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go deleted file mode 100644 index 8818fb827..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) { - return 0, ole.NewError(ole.E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go deleted file mode 100644 index ab9c0d8dc..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build windows - -package oleutil - -import ( - "reflect" - "syscall" - "unsafe" - - ole "github.com/go-ole/go-ole" -) - -// ConnectObject creates a connection point between two services for communication. -func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) { - unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer) - if err != nil { - return - } - - container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown)) - var point *ole.IConnectionPoint - err = container.FindConnectionPoint(iid, &point) - if err != nil { - return - } - if edisp, ok := idisp.(*ole.IUnknown); ok { - cookie, err = point.Advise(edisp) - container.Release() - if err != nil { - return - } - } - rv := reflect.ValueOf(disp).Elem() - if rv.Type().Kind() == reflect.Struct { - dest := &stdDispatch{} - dest.lpVtbl = &stdDispatchVtbl{} - dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface) - dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef) - dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease) - dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount) - dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo) - dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames) - dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke) - dest.iface = disp - dest.iid = iid - cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest))) - container.Release() - if err != nil { - point.Release() - return - } - return - } - - container.Release() - - return 0, ole.NewError(ole.E_INVALIDARG) -} diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go deleted file mode 100644 index 58347628f..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go +++ /dev/null @@ -1,6 +0,0 @@ -// This file is here so go get succeeds as without it errors with: -// no buildable Go source files in ... -// -// +build !windows - -package oleutil diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go deleted file mode 100644 index f7803c1e3..000000000 --- a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go +++ /dev/null @@ -1,127 +0,0 @@ -package oleutil - -import ole "github.com/go-ole/go-ole" - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -func ClassIDFrom(programID string) (classID *ole.GUID, err error) { - return ole.ClassIDFrom(programID) -} - -// CreateObject creates object from programID based on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func CreateObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// GetActiveObject retrieves active object for program ID and interface ID based -// on interface type. -// -// Only supports IUnknown. -// -// Program ID can be either program ID or application string. -func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) { - classID, err := ole.ClassIDFrom(programID) - if err != nil { - return - } - - unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown) - if err != nil { - return - } - - return -} - -// CallMethod calls method on IDispatch with parameters. -func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params) -} - -// MustCallMethod calls method on IDispatch with parameters or panics. -func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := CallMethod(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// GetProperty retrieves property from IDispatch. -func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params) -} - -// MustGetProperty retrieves property from IDispatch or panics. -func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := GetProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutProperty mutates property. -func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params) -} - -// MustPutProperty mutates property or panics. -func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutProperty(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -// PutPropertyRef mutates property reference. -func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { - return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) -} - -// MustPutPropertyRef mutates property reference or panics. -func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { - r, err := PutPropertyRef(disp, name, params...) - if err != nil { - panic(err.Error()) - } - return r -} - -func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { - newEnum, err := disp.GetProperty("_NewEnum") - if err != nil { - return err - } - defer newEnum.Clear() - - enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant) - if err != nil { - return err - } - defer enum.Release() - - for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) { - if err != nil { - return err - } - if ferr := f(&item); ferr != nil { - return ferr - } - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go deleted file mode 100644 index a5201b56c..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package is meant to retrieve and process safe array data returned from COM. - -package ole - -// SafeArrayBound defines the SafeArray boundaries. -type SafeArrayBound struct { - Elements uint32 - LowerBound int32 -} - -// SafeArray is how COM handles arrays. -type SafeArray struct { - Dimensions uint16 - FeaturesFlag uint16 - ElementsSize uint32 - LocksAmount uint32 - Data uint32 - Bounds [16]byte -} - -// SAFEARRAY is obsolete, exists for backwards compatibility. -// Use SafeArray -type SAFEARRAY SafeArray - -// SAFEARRAYBOUND is obsolete, exists for backwards compatibility. -// Use SafeArrayBound -type SAFEARRAYBOUND SafeArrayBound diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go deleted file mode 100644 index 0dee670ce..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray_func.go +++ /dev/null @@ -1,211 +0,0 @@ -// +build !windows - -package ole - -import ( - "unsafe" -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -func safeArrayAccessData(safearray *SafeArray) (uintptr, error) { - return uintptr(0), NewError(E_NOTIMPL) -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { - u := uint32(0) - return &u, NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetElement retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { - return "", NewError(E_NOTIMPL) -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { - return int32(0), NewError(E_NOTIMPL) -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (uint16, error) { - return uint16(0), NewError(E_NOTIMPL) -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) error { - return NewError(E_NOTIMPL) -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error { - return NewError(E_NOTIMPL) -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) { - return nil, NewError(E_NOTIMPL) -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error { - return NewError(E_NOTIMPL) -} diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go deleted file mode 100644 index b48a2394d..000000000 --- a/vendor/github.com/go-ole/go-ole/safearray_windows.go +++ /dev/null @@ -1,337 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -var ( - procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData") - procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData") - procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor") - procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx") - procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy") - procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData") - procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate") - procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx") - procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector") - procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx") - procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy") - procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData") - procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor") - procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim") - procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement") - procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize") - procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID") - procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound") - procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound") - procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype") - procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock") - procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex") - procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData") - procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock") - procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") - //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO - //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO - procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") - procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") -) - -// safeArrayAccessData returns raw array pointer. -// -// AKA: SafeArrayAccessData in Windows API. -// Todo: Test -func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) { - err = convertHresultToError( - procSafeArrayAccessData.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&element)))) - return -} - -// safeArrayUnaccessData releases raw array. -// -// AKA: SafeArrayUnaccessData in Windows API. -func safeArrayUnaccessData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocData allocates SafeArray. -// -// AKA: SafeArrayAllocData in Windows API. -func safeArrayAllocData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayAllocDescriptor allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptor in Windows API. -func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayAllocDescriptorEx allocates SafeArray. -// -// AKA: SafeArrayAllocDescriptorEx in Windows API. -func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayAllocDescriptorEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopy returns copy of SafeArray. -// -// AKA: SafeArrayCopy in Windows API. -func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) { - err = convertHresultToError( - procSafeArrayCopy.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(&safearray)))) - return -} - -// safeArrayCopyData duplicates SafeArray into another SafeArray object. -// -// AKA: SafeArrayCopyData in Windows API. -func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) { - err = convertHresultToError( - procSafeArrayCopyData.Call( - uintptr(unsafe.Pointer(original)), - uintptr(unsafe.Pointer(duplicate)))) - return -} - -// safeArrayCreate creates SafeArray. -// -// AKA: SafeArrayCreate in Windows API. -func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreate.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds))) - safearray = (*SafeArray)(unsafe.Pointer(&sa)) - return -} - -// safeArrayCreateEx creates SafeArray. -// -// AKA: SafeArrayCreateEx in Windows API. -func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateEx.Call( - uintptr(variantType), - uintptr(dimensions), - uintptr(unsafe.Pointer(bounds)), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVector creates SafeArray. -// -// AKA: SafeArrayCreateVector in Windows API. -func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVector.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length)) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayCreateVectorEx creates SafeArray. -// -// AKA: SafeArrayCreateVectorEx in Windows API. -func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) { - sa, _, err := procSafeArrayCreateVectorEx.Call( - uintptr(variantType), - uintptr(lowerBound), - uintptr(length), - extra) - safearray = (*SafeArray)(unsafe.Pointer(sa)) - return -} - -// safeArrayDestroy destroys SafeArray object. -// -// AKA: SafeArrayDestroy in Windows API. -func safeArrayDestroy(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyData destroys SafeArray object. -// -// AKA: SafeArrayDestroyData in Windows API. -func safeArrayDestroyData(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayDestroyDescriptor destroys SafeArray object. -// -// AKA: SafeArrayDestroyDescriptor in Windows API. -func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayGetDim is the amount of dimensions in the SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetDim in Windows API. -func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) { - l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray))) - dimensions = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElementSize is the element size in bytes. -// -// AKA: SafeArrayGetElemsize in Windows API. -func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { - l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray))) - length = (*uint32)(unsafe.Pointer(l)) - return -} - -// safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { - return convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(pv))) -} - -// safeArrayGetElementString retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { - var element *int16 - err = convertHresultToError( - procSafeArrayGetElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(&element)))) - str = BstrToString(*(**uint16)(unsafe.Pointer(&element))) - SysFreeString(element) - return -} - -// safeArrayGetIID is the InterfaceID of the elements in the SafeArray. -// -// AKA: SafeArrayGetIID in Windows API. -func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { - err = convertHresultToError( - procSafeArrayGetIID.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&guid)))) - return -} - -// safeArrayGetLBound returns lower bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { - err = convertHresultToError( - procSafeArrayGetLBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&lowerBound)))) - return -} - -// safeArrayGetUBound returns upper bounds of SafeArray. -// -// SafeArrays may have multiple dimensions. Meaning, it could be -// multidimensional array. -// -// AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { - err = convertHresultToError( - procSafeArrayGetUBound.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(dimension), - uintptr(unsafe.Pointer(&upperBound)))) - return -} - -// safeArrayGetVartype returns data type of SafeArray. -// -// AKA: SafeArrayGetVartype in Windows API. -func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) { - err = convertHresultToError( - procSafeArrayGetVartype.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&varType)))) - return -} - -// safeArrayLock locks SafeArray for reading to modify SafeArray. -// -// This must be called during some calls to ensure that another process does not -// read or write to the SafeArray during editing. -// -// AKA: SafeArrayLock in Windows API. -func safeArrayLock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayUnlock unlocks SafeArray for reading. -// -// AKA: SafeArrayUnlock in Windows API. -func safeArrayUnlock(safearray *SafeArray) (err error) { - err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray)))) - return -} - -// safeArrayPutElement stores the data element at the specified location in the -// array. -// -// AKA: SafeArrayPutElement in Windows API. -func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) { - err = convertHresultToError( - procSafeArrayPutElement.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&index)), - uintptr(unsafe.Pointer(element)))) - return -} - -// safeArrayGetRecordInfo accesses IRecordInfo info for custom types. -// -// AKA: SafeArrayGetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) { - err = convertHresultToError( - procSafeArrayGetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} - -// safeArraySetRecordInfo mutates IRecordInfo info for custom types. -// -// AKA: SafeArraySetRecordInfo in Windows API. -// -// XXX: Must implement IRecordInfo interface for this to return. -func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) { - err = convertHresultToError( - procSafeArraySetRecordInfo.Call( - uintptr(unsafe.Pointer(safearray)), - uintptr(unsafe.Pointer(&recordInfo)))) - return -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go deleted file mode 100644 index 259f488ec..000000000 --- a/vendor/github.com/go-ole/go-ole/safearrayconversion.go +++ /dev/null @@ -1,140 +0,0 @@ -// Helper for converting SafeArray to array of objects. - -package ole - -import ( - "unsafe" -) - -type SafeArrayConversion struct { - Array *SafeArray -} - -func (sac *SafeArrayConversion) ToStringArray() (strings []string) { - totalElements, _ := sac.TotalElements(0) - strings = make([]string, totalElements) - - for i := int32(0); i < totalElements; i++ { - strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) - } - - return -} - -func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { - totalElements, _ := sac.TotalElements(0) - bytes = make([]byte, totalElements) - - for i := int32(0); i < totalElements; i++ { - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) - } - - return -} - -func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { - totalElements, _ := sac.TotalElements(0) - values = make([]interface{}, totalElements) - vt, _ := safeArrayGetVartype(sac.Array) - - for i := int32(0); i < totalElements; i++ { - switch VT(vt) { - case VT_BOOL: - var v bool - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I1: - var v int8 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I2: - var v int16 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I4: - var v int32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_I8: - var v int64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI1: - var v uint8 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI2: - var v uint16 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI4: - var v uint32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_UI8: - var v uint64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_R4: - var v float32 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_R8: - var v float64 - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_BSTR: - var v string - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v - case VT_VARIANT: - var v VARIANT - safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) - values[i] = v.Value() - default: - // TODO - } - } - - return -} - -func (sac *SafeArrayConversion) GetType() (varType uint16, err error) { - return safeArrayGetVartype(sac.Array) -} - -func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) { - return safeArrayGetDim(sac.Array) -} - -func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { - return safeArrayGetElementSize(sac.Array) -} - -func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { - if index < 1 { - index = 1 - } - - // Get array bounds - var LowerBounds int32 - var UpperBounds int32 - - LowerBounds, err = safeArrayGetLBound(sac.Array, index) - if err != nil { - return - } - - UpperBounds, err = safeArrayGetUBound(sac.Array, index) - if err != nil { - return - } - - totalElements = UpperBounds - LowerBounds + 1 - return -} - -// Release Safe Array memory -func (sac *SafeArrayConversion) Release() { - safeArrayDestroy(sac.Array) -} diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go deleted file mode 100644 index a9fa885f1..000000000 --- a/vendor/github.com/go-ole/go-ole/safearrayslices.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package ole - -import ( - "unsafe" -) - -func safeArrayFromByteSlice(slice []byte) *SafeArray { - array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []byte to SAFEARRAY") - } - - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v))) - } - return array -} - -func safeArrayFromStringSlice(slice []string) *SafeArray { - array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice))) - - if array == nil { - panic("Could not convert []string to SAFEARRAY") - } - // SysAllocStringLen(s) - for i, v := range slice { - safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v)))) - } - return array -} diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go deleted file mode 100644 index 99ee82dc3..000000000 --- a/vendor/github.com/go-ole/go-ole/utility.go +++ /dev/null @@ -1,101 +0,0 @@ -package ole - -import ( - "unicode/utf16" - "unsafe" -) - -// ClassIDFrom retrieves class ID whether given is program ID or application string. -// -// Helper that provides check against both Class ID from Program ID and Class ID from string. It is -// faster, if you know which you are using, to use the individual functions, but this will check -// against available functions for you. -func ClassIDFrom(programID string) (classID *GUID, err error) { - classID, err = CLSIDFromProgID(programID) - if err != nil { - classID, err = CLSIDFromString(programID) - if err != nil { - return - } - } - return -} - -// BytePtrToString converts byte pointer to a Go string. -func BytePtrToString(p *byte) string { - a := (*[10000]uint8)(unsafe.Pointer(p)) - i := 0 - for a[i] != 0 { - i++ - } - return string(a[:i]) -} - -// UTF16PtrToString is alias for LpOleStrToString. -// -// Kept for compatibility reasons. -func UTF16PtrToString(p *uint16) string { - return LpOleStrToString(p) -} - -// LpOleStrToString converts COM Unicode to Go string. -func LpOleStrToString(p *uint16) string { - if p == nil { - return "" - } - - length := lpOleStrLen(p) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - - return string(utf16.Decode(a)) -} - -// BstrToString converts COM binary string to Go string. -func BstrToString(p *uint16) string { - if p == nil { - return "" - } - length := SysStringLen((*int16)(unsafe.Pointer(p))) - a := make([]uint16, length) - - ptr := unsafe.Pointer(p) - - for i := 0; i < int(length); i++ { - a[i] = *(*uint16)(ptr) - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return string(utf16.Decode(a)) -} - -// lpOleStrLen returns the length of Unicode string. -func lpOleStrLen(p *uint16) (length int64) { - if p == nil { - return 0 - } - - ptr := unsafe.Pointer(p) - - for i := 0; ; i++ { - if 0 == *(*uint16)(ptr) { - length = int64(i) - break - } - ptr = unsafe.Pointer(uintptr(ptr) + 2) - } - return -} - -// convertHresultToError converts syscall to error, if call is unsuccessful. -func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) { - if hr != 0 { - err = NewError(hr) - } - return -} diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go deleted file mode 100644 index ebe00f1cf..000000000 --- a/vendor/github.com/go-ole/go-ole/variables.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build windows - -package ole - -import ( - "syscall" -) - -var ( - modcombase = syscall.NewLazyDLL("combase.dll") - modkernel32, _ = syscall.LoadDLL("kernel32.dll") - modole32, _ = syscall.LoadDLL("ole32.dll") - modoleaut32, _ = syscall.LoadDLL("oleaut32.dll") - modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll") - moduser32, _ = syscall.LoadDLL("user32.dll") -) diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go deleted file mode 100644 index 967a23fea..000000000 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ /dev/null @@ -1,105 +0,0 @@ -package ole - -import "unsafe" - -// NewVariant returns new variant based on type and value. -func NewVariant(vt VT, val int64) VARIANT { - return VARIANT{VT: vt, Val: val} -} - -// ToIUnknown converts Variant to Unknown object. -func (v *VARIANT) ToIUnknown() *IUnknown { - if v.VT != VT_UNKNOWN { - return nil - } - return (*IUnknown)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToIDispatch converts variant to dispatch object. -func (v *VARIANT) ToIDispatch() *IDispatch { - if v.VT != VT_DISPATCH { - return nil - } - return (*IDispatch)(unsafe.Pointer(uintptr(v.Val))) -} - -// ToArray converts variant to SafeArray helper. -func (v *VARIANT) ToArray() *SafeArrayConversion { - if v.VT != VT_SAFEARRAY { - if v.VT&VT_ARRAY == 0 { - return nil - } - } - var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val))) - return &SafeArrayConversion{safeArray} -} - -// ToString converts variant to Go string. -func (v *VARIANT) ToString() string { - if v.VT != VT_BSTR { - return "" - } - return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val))) -} - -// Clear the memory of variant object. -func (v *VARIANT) Clear() error { - return VariantClear(v) -} - -// Value returns variant value based on its type. -// -// Currently supported types: 2- and 4-byte integers, strings, bools. -// Note that 64-bit integers, datetimes, and other types are stored as strings -// and will be returned as strings. -// -// Needs to be further converted, because this returns an interface{}. -func (v *VARIANT) Value() interface{} { - switch v.VT { - case VT_I1: - return int8(v.Val) - case VT_UI1: - return uint8(v.Val) - case VT_I2: - return int16(v.Val) - case VT_UI2: - return uint16(v.Val) - case VT_I4: - return int32(v.Val) - case VT_UI4: - return uint32(v.Val) - case VT_I8: - return int64(v.Val) - case VT_UI8: - return uint64(v.Val) - case VT_INT: - return int(v.Val) - case VT_UINT: - return uint(v.Val) - case VT_INT_PTR: - return uintptr(v.Val) // TODO - case VT_UINT_PTR: - return uintptr(v.Val) - case VT_R4: - return *(*float32)(unsafe.Pointer(&v.Val)) - case VT_R8: - return *(*float64)(unsafe.Pointer(&v.Val)) - case VT_BSTR: - return v.ToString() - case VT_DATE: - // VT_DATE type will either return float64 or time.Time. - d := uint64(v.Val) - date, err := GetVariantDate(d) - if err != nil { - return float64(v.Val) - } - return date - case VT_UNKNOWN: - return v.ToIUnknown() - case VT_DISPATCH: - return v.ToIDispatch() - case VT_BOOL: - return v.Val != 0 - } - return nil -} diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go deleted file mode 100644 index e73736bf3..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_386.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build 386 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go deleted file mode 100644 index dccdde132..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build amd64 - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go deleted file mode 100644 index 1b970f63f..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_386.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build windows,386 - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - v1 := uint32(value) - v2 := uint32(value >> 32) - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go deleted file mode 100644 index 6952f1f0d..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build windows,amd64 - -package ole - -import ( - "errors" - "syscall" - "time" - "unsafe" -) - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value uint64) (time.Time, error) { - var st syscall.Systemtime - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go deleted file mode 100644 index 326427a7d..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64le - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go deleted file mode 100644 index 9874ca66b..000000000 --- a/vendor/github.com/go-ole/go-ole/variant_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package ole - -type VARIANT struct { - VT VT // 2 - wReserved1 uint16 // 4 - wReserved2 uint16 // 6 - wReserved3 uint16 // 8 - Val int64 // 16 - _ [8]byte // 24 -} diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go deleted file mode 100644 index 729b4a04d..000000000 --- a/vendor/github.com/go-ole/go-ole/vt_string.go +++ /dev/null @@ -1,58 +0,0 @@ -// generated by stringer -output vt_string.go -type VT; DO NOT EDIT - -package ole - -import "fmt" - -const ( - _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL" - _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR" - _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR" - _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID" - _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR" - _VT_name_5 = "VT_ARRAY" - _VT_name_6 = "VT_BYREF" - _VT_name_7 = "VT_RESERVED" - _VT_name_8 = "VT_ILLEGAL" -) - -var ( - _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110} - _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122} - _VT_index_2 = [...]uint8{0, 9, 19, 30} - _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98} - _VT_index_4 = [...]uint8{0, 12, 21} - _VT_index_5 = [...]uint8{0, 8} - _VT_index_6 = [...]uint8{0, 8} - _VT_index_7 = [...]uint8{0, 11} - _VT_index_8 = [...]uint8{0, 10} -) - -func (i VT) String() string { - switch { - case 0 <= i && i <= 14: - return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]] - case 16 <= i && i <= 31: - i -= 16 - return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]] - case 36 <= i && i <= 38: - i -= 36 - return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]] - case 64 <= i && i <= 72: - i -= 64 - return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]] - case 4095 <= i && i <= 4096: - i -= 4095 - return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]] - case i == 8192: - return _VT_name_5 - case i == 16384: - return _VT_name_6 - case i == 32768: - return _VT_name_7 - case i == 65535: - return _VT_name_8 - default: - return fmt.Sprintf("VT(%d)", i) - } -} diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go deleted file mode 100644 index 4e9eca732..000000000 --- a/vendor/github.com/go-ole/go-ole/winrt.go +++ /dev/null @@ -1,99 +0,0 @@ -// +build windows - -package ole - -import ( - "reflect" - "syscall" - "unicode/utf8" - "unsafe" -) - -var ( - procRoInitialize = modcombase.NewProc("RoInitialize") - procRoActivateInstance = modcombase.NewProc("RoActivateInstance") - procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory") - procWindowsCreateString = modcombase.NewProc("WindowsCreateString") - procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString") - procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer") -) - -func RoInitialize(thread_type uint32) (err error) { - hr, _, _ := procRoInitialize.Call(uintptr(thread_type)) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoActivateInstance.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - hClsid, err := NewHString(clsid) - if err != nil { - return nil, err - } - defer DeleteHString(hClsid) - - hr, _, _ := procRoGetActivationFactory.Call( - uintptr(unsafe.Pointer(hClsid)), - uintptr(unsafe.Pointer(iid)), - uintptr(unsafe.Pointer(&ins))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - u16 := syscall.StringToUTF16Ptr(s) - len := uint32(utf8.RuneCountInString(s)) - hr, _, _ := procWindowsCreateString.Call( - uintptr(unsafe.Pointer(u16)), - uintptr(len), - uintptr(unsafe.Pointer(&hstring))) - if hr != 0 { - err = NewError(hr) - } - return -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring)) - if hr != 0 { - err = NewError(hr) - } - return -} - -// String returns Go string value of HString. -func (h HString) String() string { - var u16buf uintptr - var u16len uint32 - u16buf, _, _ = procWindowsGetStringRawBuffer.Call( - uintptr(h), - uintptr(unsafe.Pointer(&u16len))) - - u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)} - u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr)) - return syscall.UTF16ToString(u16) -} diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go deleted file mode 100644 index 52e6d74c9..000000000 --- a/vendor/github.com/go-ole/go-ole/winrt_doc.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package ole - -// RoInitialize -func RoInitialize(thread_type uint32) (err error) { - return NewError(E_NOTIMPL) -} - -// RoActivateInstance -func RoActivateInstance(clsid string) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// RoGetActivationFactory -func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) { - return nil, NewError(E_NOTIMPL) -} - -// HString is handle string for pointers. -type HString uintptr - -// NewHString returns a new HString for Go string. -func NewHString(s string) (hstring HString, err error) { - return HString(uintptr(0)), NewError(E_NOTIMPL) -} - -// DeleteHString deletes HString. -func DeleteHString(hstring HString) (err error) { - return NewError(E_NOTIMPL) -} - -// String returns Go string value of HString. -func (h HString) String() string { - return "" -} diff --git a/vendor/github.com/go-openapi/analysis/.codecov.yml b/vendor/github.com/go-openapi/analysis/.codecov.yml deleted file mode 100644 index 841c4281e..000000000 --- a/vendor/github.com/go-openapi/analysis/.codecov.yml +++ /dev/null @@ -1,5 +0,0 @@ -coverage: - status: - patch: - default: - target: 80% diff --git a/vendor/github.com/go-openapi/analysis/.gitattributes b/vendor/github.com/go-openapi/analysis/.gitattributes deleted file mode 100644 index d020be8ea..000000000 --- a/vendor/github.com/go-openapi/analysis/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/analysis/.gitignore b/vendor/github.com/go-openapi/analysis/.gitignore deleted file mode 100644 index 87c3bd3e6..000000000 --- a/vendor/github.com/go-openapi/analysis/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -secrets.yml -coverage.out -coverage.txt -*.cov -.idea diff --git a/vendor/github.com/go-openapi/analysis/.golangci.yml b/vendor/github.com/go-openapi/analysis/.golangci.yml deleted file mode 100644 index 8cad29879..000000000 --- a/vendor/github.com/go-openapi/analysis/.golangci.yml +++ /dev/null @@ -1,53 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 40 - gocognit: - min-complexity: 40 - maligned: - suggest-new: true - dupl: - threshold: 150 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - # scopelint is useful, but also reports false positives - # that unfortunately can't be disabled. So we disable the - # linter rather than changing code that works. - # see: https://github.com/kyoh86/scopelint/issues/4 - - scopelint - - godox - - gocognit - #- whitespace - - wsl - - funlen - - testpackage - - wrapcheck - #- nlreturn - - gomnd - - goerr113 - - exhaustivestruct - #- errorlint - #- nestif - - gofumpt - - godot - - gci - - dogsled - - paralleltest - - tparallel - - thelper - - ifshort - - forbidigo - - cyclop - - varnamelen diff --git a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/analysis/LICENSE b/vendor/github.com/go-openapi/analysis/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/analysis/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/analysis/README.md b/vendor/github.com/go-openapi/analysis/README.md deleted file mode 100644 index aad6da10f..000000000 --- a/vendor/github.com/go-openapi/analysis/README.md +++ /dev/null @@ -1,31 +0,0 @@ -# OpenAPI initiative analysis - -[![Build Status](https://travis-ci.org/go-openapi/analysis.svg?branch=master)](https://travis-ci.org/go-openapi/analysis) -[![Build status](https://ci.appveyor.com/api/projects/status/x377t5o9ennm847o/branch/master?svg=true)](https://ci.appveyor.com/project/casualjim/go-openapi/analysis/branch/master) -[![codecov](https://codecov.io/gh/go-openapi/analysis/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/analysis) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/analysis.svg)](https://pkg.go.dev/github.com/go-openapi/analysis) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/analysis)](https://goreportcard.com/report/github.com/go-openapi/analysis) - - -A foundational library to analyze an OAI specification document for easier reasoning about the content. - -## What's inside? - -* A analyzer providing methods to walk the functional content of a specification -* A spec flattener producing a self-contained document bundle, while preserving `$ref`s -* A spec merger ("mixin") to merge several spec documents into a primary spec -* A spec "fixer" ensuring that response descriptions are non empty - -[Documentation](https://godoc.org/github.com/go-openapi/analysis) - -## FAQ - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> diff --git a/vendor/github.com/go-openapi/analysis/analyzer.go b/vendor/github.com/go-openapi/analysis/analyzer.go deleted file mode 100644 index c17aee1b6..000000000 --- a/vendor/github.com/go-openapi/analysis/analyzer.go +++ /dev/null @@ -1,1064 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - slashpath "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -type referenceAnalysis struct { - schemas map[string]spec.Ref - responses map[string]spec.Ref - parameters map[string]spec.Ref - items map[string]spec.Ref - headerItems map[string]spec.Ref - parameterItems map[string]spec.Ref - allRefs map[string]spec.Ref - pathItems map[string]spec.Ref -} - -func (r *referenceAnalysis) addRef(key string, ref spec.Ref) { - r.allRefs["#"+key] = ref -} - -func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items, location string) { - r.items["#"+key] = items.Ref - r.addRef(key, items.Ref) - if location == "header" { - // NOTE: in swagger 2.0, headers and parameters (but not body param schemas) are simple schemas - // and $ref are not supported here. However it is possible to analyze this. - r.headerItems["#"+key] = items.Ref - } else { - r.parameterItems["#"+key] = items.Ref - } -} - -func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) { - r.schemas["#"+key] = ref.Schema.Ref - r.addRef(key, ref.Schema.Ref) -} - -func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) { - r.responses["#"+key] = resp.Ref - r.addRef(key, resp.Ref) -} - -func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) { - r.parameters["#"+key] = param.Ref - r.addRef(key, param.Ref) -} - -func (r *referenceAnalysis) addPathItemRef(key string, pathItem *spec.PathItem) { - r.pathItems["#"+key] = pathItem.Ref - r.addRef(key, pathItem.Ref) -} - -type patternAnalysis struct { - parameters map[string]string - headers map[string]string - items map[string]string - schemas map[string]string - allPatterns map[string]string -} - -func (p *patternAnalysis) addPattern(key, pattern string) { - p.allPatterns["#"+key] = pattern -} - -func (p *patternAnalysis) addParameterPattern(key, pattern string) { - p.parameters["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addHeaderPattern(key, pattern string) { - p.headers["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addItemsPattern(key, pattern string) { - p.items["#"+key] = pattern - p.addPattern(key, pattern) -} - -func (p *patternAnalysis) addSchemaPattern(key, pattern string) { - p.schemas["#"+key] = pattern - p.addPattern(key, pattern) -} - -type enumAnalysis struct { - parameters map[string][]interface{} - headers map[string][]interface{} - items map[string][]interface{} - schemas map[string][]interface{} - allEnums map[string][]interface{} -} - -func (p *enumAnalysis) addEnum(key string, enum []interface{}) { - p.allEnums["#"+key] = enum -} - -func (p *enumAnalysis) addParameterEnum(key string, enum []interface{}) { - p.parameters["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addHeaderEnum(key string, enum []interface{}) { - p.headers["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addItemsEnum(key string, enum []interface{}) { - p.items["#"+key] = enum - p.addEnum(key, enum) -} - -func (p *enumAnalysis) addSchemaEnum(key string, enum []interface{}) { - p.schemas["#"+key] = enum - p.addEnum(key, enum) -} - -// New takes a swagger spec object and returns an analyzed spec document. -// The analyzed document contains a number of indices that make it easier to -// reason about semantics of a swagger specification for use in code generation -// or validation etc. -func New(doc *spec.Swagger) *Spec { - a := &Spec{ - spec: doc, - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - a.reset() - a.initialize() - - return a -} - -// Spec is an analyzed specification object. It takes a swagger spec object and turns it into a registry -// with a bunch of utility methods to act on the information in the spec. -type Spec struct { - spec *spec.Swagger - consumes map[string]struct{} - produces map[string]struct{} - authSchemes map[string]struct{} - operations map[string]map[string]*spec.Operation - references referenceAnalysis - patterns patternAnalysis - enums enumAnalysis - allSchemas map[string]SchemaRef - allOfs map[string]SchemaRef -} - -func (s *Spec) reset() { - s.consumes = make(map[string]struct{}, 150) - s.produces = make(map[string]struct{}, 150) - s.authSchemes = make(map[string]struct{}, 150) - s.operations = make(map[string]map[string]*spec.Operation, 150) - s.allSchemas = make(map[string]SchemaRef, 150) - s.allOfs = make(map[string]SchemaRef, 150) - s.references.schemas = make(map[string]spec.Ref, 150) - s.references.pathItems = make(map[string]spec.Ref, 150) - s.references.responses = make(map[string]spec.Ref, 150) - s.references.parameters = make(map[string]spec.Ref, 150) - s.references.items = make(map[string]spec.Ref, 150) - s.references.headerItems = make(map[string]spec.Ref, 150) - s.references.parameterItems = make(map[string]spec.Ref, 150) - s.references.allRefs = make(map[string]spec.Ref, 150) - s.patterns.parameters = make(map[string]string, 150) - s.patterns.headers = make(map[string]string, 150) - s.patterns.items = make(map[string]string, 150) - s.patterns.schemas = make(map[string]string, 150) - s.patterns.allPatterns = make(map[string]string, 150) - s.enums.parameters = make(map[string][]interface{}, 150) - s.enums.headers = make(map[string][]interface{}, 150) - s.enums.items = make(map[string][]interface{}, 150) - s.enums.schemas = make(map[string][]interface{}, 150) - s.enums.allEnums = make(map[string][]interface{}, 150) -} - -func (s *Spec) reload() { - s.reset() - s.initialize() -} - -func (s *Spec) initialize() { - for _, c := range s.spec.Consumes { - s.consumes[c] = struct{}{} - } - for _, c := range s.spec.Produces { - s.produces[c] = struct{}{} - } - for _, ss := range s.spec.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - for path, pathItem := range s.AllPaths() { - s.analyzeOperations(path, &pathItem) //#nosec - } - - for name, parameter := range s.spec.Parameters { - refPref := slashpath.Join("/parameters", jsonpointer.Escape(name)) - if parameter.Items != nil { - s.analyzeItems("items", parameter.Items, refPref, "parameter") - } - if parameter.In == "body" && parameter.Schema != nil { - s.analyzeSchema("schema", parameter.Schema, refPref) - } - if parameter.Pattern != "" { - s.patterns.addParameterPattern(refPref, parameter.Pattern) - } - if len(parameter.Enum) > 0 { - s.enums.addParameterEnum(refPref, parameter.Enum) - } - } - - for name, response := range s.spec.Responses { - refPref := slashpath.Join("/responses", jsonpointer.Escape(name)) - for k, v := range response.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - if v.Items != nil { - s.analyzeItems("items", v.Items, hRefPref, "header") - } - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - if response.Schema != nil { - s.analyzeSchema("schema", response.Schema, refPref) - } - } - - for name := range s.spec.Definitions { - schema := s.spec.Definitions[name] - s.analyzeSchema(name, &schema, "/definitions") - } - // TODO: after analyzing all things and flattening schemas etc - // resolve all the collected references to their final representations - // best put in a separate method because this could get expensive -} - -func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) { - // TODO: resolve refs here? - // Currently, operations declared via pathItem $ref are known only after expansion - op := pi - if pi.Ref.String() != "" { - key := slashpath.Join("/paths", jsonpointer.Escape(path)) - s.references.addPathItemRef(key, pi) - } - s.analyzeOperation("GET", path, op.Get) - s.analyzeOperation("PUT", path, op.Put) - s.analyzeOperation("POST", path, op.Post) - s.analyzeOperation("PATCH", path, op.Patch) - s.analyzeOperation("DELETE", path, op.Delete) - s.analyzeOperation("HEAD", path, op.Head) - s.analyzeOperation("OPTIONS", path, op.Options) - for i, param := range op.Parameters { - refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - if param.Items != nil { - s.analyzeItems("items", param.Items, refPref, "parameter") - } - if param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } - } -} - -func (s *Spec) analyzeItems(name string, items *spec.Items, prefix, location string) { - if items == nil { - return - } - refPref := slashpath.Join(prefix, name) - s.analyzeItems(name, items.Items, refPref, location) - if items.Ref.String() != "" { - s.references.addItemsRef(refPref, items, location) - } - if items.Pattern != "" { - s.patterns.addItemsPattern(refPref, items.Pattern) - } - if len(items.Enum) > 0 { - s.enums.addItemsEnum(refPref, items.Enum) - } -} - -func (s *Spec) analyzeParameter(prefix string, i int, param spec.Parameter) { - refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i)) - if param.Ref.String() != "" { - s.references.addParamRef(refPref, ¶m) //#nosec - } - - if param.Pattern != "" { - s.patterns.addParameterPattern(refPref, param.Pattern) - } - - if len(param.Enum) > 0 { - s.enums.addParameterEnum(refPref, param.Enum) - } - - s.analyzeItems("items", param.Items, refPref, "parameter") - if param.In == "body" && param.Schema != nil { - s.analyzeSchema("schema", param.Schema, refPref) - } -} - -func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) { - if op == nil { - return - } - - for _, c := range op.Consumes { - s.consumes[c] = struct{}{} - } - - for _, c := range op.Produces { - s.produces[c] = struct{}{} - } - - for _, ss := range op.Security { - for k := range ss { - s.authSchemes[k] = struct{}{} - } - } - - if _, ok := s.operations[method]; !ok { - s.operations[method] = make(map[string]*spec.Operation) - } - - s.operations[method][path] = op - prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method)) - for i, param := range op.Parameters { - s.analyzeParameter(prefix, i, param) - } - - if op.Responses == nil { - return - } - - if op.Responses.Default != nil { - s.analyzeDefaultResponse(prefix, op.Responses.Default) - } - - for k, res := range op.Responses.StatusCodeResponses { - s.analyzeResponse(prefix, k, res) - } -} - -func (s *Spec) analyzeDefaultResponse(prefix string, res *spec.Response) { - refPref := slashpath.Join(prefix, "responses", "default") - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, res) - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeResponse(prefix string, k int, res spec.Response) { - refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k)) - if res.Ref.String() != "" { - s.references.addResponseRef(refPref, &res) //#nosec - } - - for k, v := range res.Headers { - hRefPref := slashpath.Join(refPref, "headers", k) - s.analyzeItems("items", v.Items, hRefPref, "header") - if v.Pattern != "" { - s.patterns.addHeaderPattern(hRefPref, v.Pattern) - } - - if len(v.Enum) > 0 { - s.enums.addHeaderEnum(hRefPref, v.Enum) - } - } - - if res.Schema != nil { - s.analyzeSchema("schema", res.Schema, refPref) - } -} - -func (s *Spec) analyzeSchema(name string, schema *spec.Schema, prefix string) { - refURI := slashpath.Join(prefix, jsonpointer.Escape(name)) - schRef := SchemaRef{ - Name: name, - Schema: schema, - Ref: spec.MustCreateRef("#" + refURI), - TopLevel: prefix == "/definitions", - } - - s.allSchemas["#"+refURI] = schRef - - if schema.Ref.String() != "" { - s.references.addSchemaRef(refURI, schRef) - } - - if schema.Pattern != "" { - s.patterns.addSchemaPattern(refURI, schema.Pattern) - } - - if len(schema.Enum) > 0 { - s.enums.addSchemaEnum(refURI, schema.Enum) - } - - for k, v := range schema.Definitions { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "definitions")) - } - - for k, v := range schema.Properties { - v := v - s.analyzeSchema(k, &v, slashpath.Join(refURI, "properties")) - } - - for k, v := range schema.PatternProperties { - v := v - // NOTE: swagger 2.0 does not support PatternProperties. - // However it is possible to analyze this in a schema - s.analyzeSchema(k, &v, slashpath.Join(refURI, "patternProperties")) - } - - for i := range schema.AllOf { - v := &schema.AllOf[i] - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf")) - } - - if len(schema.AllOf) > 0 { - s.allOfs["#"+refURI] = schRef - } - - for i := range schema.AnyOf { - v := &schema.AnyOf[i] - // NOTE: swagger 2.0 does not support anyOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf")) - } - - for i := range schema.OneOf { - v := &schema.OneOf[i] - // NOTE: swagger 2.0 does not support oneOf constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf")) - } - - if schema.Not != nil { - // NOTE: swagger 2.0 does not support "not" constructs. - // However it is possible to analyze this in a schema - s.analyzeSchema("not", schema.Not, refURI) - } - - if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - s.analyzeSchema("additionalProperties", schema.AdditionalProperties.Schema, refURI) - } - - if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { - // NOTE: swagger 2.0 does not support AdditionalItems. - // However it is possible to analyze this in a schema - s.analyzeSchema("additionalItems", schema.AdditionalItems.Schema, refURI) - } - - if schema.Items != nil { - if schema.Items.Schema != nil { - s.analyzeSchema("items", schema.Items.Schema, refURI) - } - - for i := range schema.Items.Schemas { - sch := &schema.Items.Schemas[i] - s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items")) - } - } -} - -// SecurityRequirement is a representation of a security requirement for an operation -type SecurityRequirement struct { - Name string - Scopes []string -} - -// SecurityRequirementsFor gets the security requirements for the operation -func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) [][]SecurityRequirement { - if s.spec.Security == nil && operation.Security == nil { - return nil - } - - schemes := s.spec.Security - if operation.Security != nil { - schemes = operation.Security - } - - result := [][]SecurityRequirement{} - for _, scheme := range schemes { - if len(scheme) == 0 { - // append a zero object for anonymous - result = append(result, []SecurityRequirement{{}}) - - continue - } - - var reqs []SecurityRequirement - for k, v := range scheme { - if v == nil { - v = []string{} - } - reqs = append(reqs, SecurityRequirement{Name: k, Scopes: v}) - } - - result = append(result, reqs) - } - - return result -} - -// SecurityDefinitionsForRequirements gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsForRequirements(requirements []SecurityRequirement) map[string]spec.SecurityScheme { - result := make(map[string]spec.SecurityScheme) - - for _, v := range requirements { - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - - return result -} - -// SecurityDefinitionsFor gets the matching security definitions for a set of requirements -func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme { - requirements := s.SecurityRequirementsFor(operation) - if len(requirements) == 0 { - return nil - } - - result := make(map[string]spec.SecurityScheme) - for _, reqs := range requirements { - for _, v := range reqs { - if v.Name == "" { - // optional requirement - continue - } - - if _, ok := result[v.Name]; ok { - // duplicate requirement - continue - } - - if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok { - if definition != nil { - result[v.Name] = *definition - } - } - } - } - - return result -} - -// ConsumesFor gets the mediatypes for the operation -func (s *Spec) ConsumesFor(operation *spec.Operation) []string { - if len(operation.Consumes) == 0 { - cons := make(map[string]struct{}, len(s.spec.Consumes)) - for _, k := range s.spec.Consumes { - cons[k] = struct{}{} - } - - return s.structMapKeys(cons) - } - - cons := make(map[string]struct{}, len(operation.Consumes)) - for _, c := range operation.Consumes { - cons[c] = struct{}{} - } - - return s.structMapKeys(cons) -} - -// ProducesFor gets the mediatypes for the operation -func (s *Spec) ProducesFor(operation *spec.Operation) []string { - if len(operation.Produces) == 0 { - prod := make(map[string]struct{}, len(s.spec.Produces)) - for _, k := range s.spec.Produces { - prod[k] = struct{}{} - } - - return s.structMapKeys(prod) - } - - prod := make(map[string]struct{}, len(operation.Produces)) - for _, c := range operation.Produces { - prod[c] = struct{}{} - } - - return s.structMapKeys(prod) -} - -func mapKeyFromParam(param *spec.Parameter) string { - return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param)) -} - -func fieldNameFromParam(param *spec.Parameter) string { - // TODO: this should be x-go-name - if nm, ok := param.Extensions.GetString("go-name"); ok { - return nm - } - - return swag.ToGoName(param.Name) -} - -// ErrorOnParamFunc is a callback function to be invoked -// whenever an error is encountered while resolving references -// on parameters. -// -// This function takes as input the spec.Parameter which triggered the -// error and the error itself. -// -// If the callback function returns false, the calling function should bail. -// -// If it returns true, the calling function should continue evaluating parameters. -// A nil ErrorOnParamFunc must be evaluated as equivalent to panic(). -type ErrorOnParamFunc func(spec.Parameter, error) bool - -func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter, callmeOnError ErrorOnParamFunc) { - for _, param := range parameters { - pr := param - if pr.Ref.String() == "" { - res[mapKeyFromParam(&pr)] = pr - - continue - } - - // resolve $ref - if callmeOnError == nil { - callmeOnError = func(_ spec.Parameter, err error) bool { - panic(err) - } - } - - obj, _, err := pr.Ref.GetPointer().Get(s.spec) - if err != nil { - if callmeOnError(param, fmt.Errorf("invalid reference: %q", pr.Ref.String())) { - continue - } - - break - } - - objAsParam, ok := obj.(spec.Parameter) - if !ok { - if callmeOnError(param, fmt.Errorf("resolved reference is not a parameter: %q", pr.Ref.String())) { - continue - } - - break - } - - pr = objAsParam - res[mapKeyFromParam(&pr)] = pr - } -} - -// ParametersFor the specified operation id. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParametersFor(operationID string) []spec.Parameter { - return s.SafeParametersFor(operationID, nil) -} - -// SafeParametersFor the specified operation id. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParametersFor(operationID string, callmeOnError ErrorOnParamFunc) []spec.Parameter { - gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter { - bag := make(map[string]spec.Parameter) - s.paramsAsMap(pi.Parameters, bag, callmeOnError) - s.paramsAsMap(op.Parameters, bag, callmeOnError) - - var res []spec.Parameter - for _, v := range bag { - res = append(res, v) - } - - return res - } - - for _, pi := range s.spec.Paths.Paths { - if pi.Get != nil && pi.Get.ID == operationID { - return gatherParams(&pi, pi.Get) //#nosec - } - if pi.Head != nil && pi.Head.ID == operationID { - return gatherParams(&pi, pi.Head) //#nosec - } - if pi.Options != nil && pi.Options.ID == operationID { - return gatherParams(&pi, pi.Options) //#nosec - } - if pi.Post != nil && pi.Post.ID == operationID { - return gatherParams(&pi, pi.Post) //#nosec - } - if pi.Patch != nil && pi.Patch.ID == operationID { - return gatherParams(&pi, pi.Patch) //#nosec - } - if pi.Put != nil && pi.Put.ID == operationID { - return gatherParams(&pi, pi.Put) //#nosec - } - if pi.Delete != nil && pi.Delete.ID == operationID { - return gatherParams(&pi, pi.Delete) //#nosec - } - } - - return nil -} - -// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Assumes parameters properly resolve references if any and that -// such references actually resolve to a parameter object. -// Otherwise, panics. -func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter { - return s.SafeParamsFor(method, path, nil) -} - -// SafeParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that -// apply for the method and path. -// -// Does not assume parameters properly resolve references or that -// such references actually resolve to a parameter object. -// -// Upon error, invoke a ErrorOnParamFunc callback with the erroneous -// parameters. If the callback is set to nil, panics upon errors. -func (s *Spec) SafeParamsFor(method, path string, callmeOnError ErrorOnParamFunc) map[string]spec.Parameter { - res := make(map[string]spec.Parameter) - if pi, ok := s.spec.Paths.Paths[path]; ok { - s.paramsAsMap(pi.Parameters, res, callmeOnError) - s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res, callmeOnError) - } - - return res -} - -// OperationForName gets the operation for the given id -func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) { - for method, pathItem := range s.operations { - for path, op := range pathItem { - if operationID == op.ID { - return method, path, op, true - } - } - } - - return "", "", nil, false -} - -// OperationFor the given method and path -func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) { - if mp, ok := s.operations[strings.ToUpper(method)]; ok { - op, fn := mp[path] - - return op, fn - } - - return nil, false -} - -// Operations gathers all the operations specified in the spec document -func (s *Spec) Operations() map[string]map[string]*spec.Operation { - return s.operations -} - -func (s *Spec) structMapKeys(mp map[string]struct{}) []string { - if len(mp) == 0 { - return nil - } - - result := make([]string, 0, len(mp)) - for k := range mp { - result = append(result, k) - } - - return result -} - -// AllPaths returns all the paths in the swagger spec -func (s *Spec) AllPaths() map[string]spec.PathItem { - if s.spec == nil || s.spec.Paths == nil { - return nil - } - - return s.spec.Paths.Paths -} - -// OperationIDs gets all the operation ids based on method an dpath -func (s *Spec) OperationIDs() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p, o := range v { - if o.ID != "" { - result = append(result, o.ID) - } else { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - } - - return result -} - -// OperationMethodPaths gets all the operation ids based on method an dpath -func (s *Spec) OperationMethodPaths() []string { - if len(s.operations) == 0 { - return nil - } - - result := make([]string, 0, len(s.operations)) - for method, v := range s.operations { - for p := range v { - result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p)) - } - } - - return result -} - -// RequiredConsumes gets all the distinct consumes that are specified in the specification document -func (s *Spec) RequiredConsumes() []string { - return s.structMapKeys(s.consumes) -} - -// RequiredProduces gets all the distinct produces that are specified in the specification document -func (s *Spec) RequiredProduces() []string { - return s.structMapKeys(s.produces) -} - -// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec -func (s *Spec) RequiredSecuritySchemes() []string { - return s.structMapKeys(s.authSchemes) -} - -// SchemaRef is a reference to a schema -type SchemaRef struct { - Name string - Ref spec.Ref - Schema *spec.Schema - TopLevel bool -} - -// SchemasWithAllOf returns schema references to all schemas that are defined -// with an allOf key -func (s *Spec) SchemasWithAllOf() (result []SchemaRef) { - for _, v := range s.allOfs { - result = append(result, v) - } - - return -} - -// AllDefinitions returns schema references for all the definitions that were discovered -func (s *Spec) AllDefinitions() (result []SchemaRef) { - for _, v := range s.allSchemas { - result = append(result, v) - } - - return -} - -// AllDefinitionReferences returns json refs for all the discovered schemas -func (s *Spec) AllDefinitionReferences() (result []string) { - for _, v := range s.references.schemas { - result = append(result, v.String()) - } - - return -} - -// AllParameterReferences returns json refs for all the discovered parameters -func (s *Spec) AllParameterReferences() (result []string) { - for _, v := range s.references.parameters { - result = append(result, v.String()) - } - - return -} - -// AllResponseReferences returns json refs for all the discovered responses -func (s *Spec) AllResponseReferences() (result []string) { - for _, v := range s.references.responses { - result = append(result, v.String()) - } - - return -} - -// AllPathItemReferences returns the references for all the items -func (s *Spec) AllPathItemReferences() (result []string) { - for _, v := range s.references.pathItems { - result = append(result, v.String()) - } - - return -} - -// AllItemsReferences returns the references for all the items in simple schemas (parameters or headers). -// -// NOTE: since Swagger 2.0 forbids $ref in simple params, this should always yield an empty slice for a valid -// Swagger 2.0 spec. -func (s *Spec) AllItemsReferences() (result []string) { - for _, v := range s.references.items { - result = append(result, v.String()) - } - - return -} - -// AllReferences returns all the references found in the document, with possible duplicates -func (s *Spec) AllReferences() (result []string) { - for _, v := range s.references.allRefs { - result = append(result, v.String()) - } - - return -} - -// AllRefs returns all the unique references found in the document -func (s *Spec) AllRefs() (result []spec.Ref) { - set := make(map[string]struct{}) - for _, v := range s.references.allRefs { - a := v.String() - if a == "" { - continue - } - - if _, ok := set[a]; !ok { - set[a] = struct{}{} - result = append(result, v) - } - } - - return -} - -func cloneStringMap(source map[string]string) map[string]string { - res := make(map[string]string, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -func cloneEnumMap(source map[string][]interface{}) map[string][]interface{} { - res := make(map[string][]interface{}, len(source)) - for k, v := range source { - res[k] = v - } - - return res -} - -// ParameterPatterns returns all the patterns found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterPatterns() map[string]string { - return cloneStringMap(s.patterns.parameters) -} - -// HeaderPatterns returns all the patterns found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderPatterns() map[string]string { - return cloneStringMap(s.patterns.headers) -} - -// ItemsPatterns returns all the patterns found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsPatterns() map[string]string { - return cloneStringMap(s.patterns.items) -} - -// SchemaPatterns returns all the patterns found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaPatterns() map[string]string { - return cloneStringMap(s.patterns.schemas) -} - -// AllPatterns returns all the patterns found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllPatterns() map[string]string { - return cloneStringMap(s.patterns.allPatterns) -} - -// ParameterEnums returns all the enums found in parameters -// the map is cloned to avoid accidental changes -func (s *Spec) ParameterEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.parameters) -} - -// HeaderEnums returns all the enums found in response headers -// the map is cloned to avoid accidental changes -func (s *Spec) HeaderEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.headers) -} - -// ItemsEnums returns all the enums found in simple array items -// the map is cloned to avoid accidental changes -func (s *Spec) ItemsEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.items) -} - -// SchemaEnums returns all the enums found in schemas -// the map is cloned to avoid accidental changes -func (s *Spec) SchemaEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.schemas) -} - -// AllEnums returns all the enums found in the spec -// the map is cloned to avoid accidental changes -func (s *Spec) AllEnums() map[string][]interface{} { - return cloneEnumMap(s.enums.allEnums) -} diff --git a/vendor/github.com/go-openapi/analysis/appveyor.yml b/vendor/github.com/go-openapi/analysis/appveyor.yml deleted file mode 100644 index c2f6fd733..000000000 --- a/vendor/github.com/go-openapi/analysis/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\analysis -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.16 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/analysis/debug.go b/vendor/github.com/go-openapi/analysis/debug.go deleted file mode 100644 index 33c15704e..000000000 --- a/vendor/github.com/go-openapi/analysis/debug.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "os" - - "github.com/go-openapi/analysis/internal/debug" -) - -var debugLog = debug.GetLogger("analysis", os.Getenv("SWAGGER_DEBUG") != "") diff --git a/vendor/github.com/go-openapi/analysis/doc.go b/vendor/github.com/go-openapi/analysis/doc.go deleted file mode 100644 index d5294c095..000000000 --- a/vendor/github.com/go-openapi/analysis/doc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package analysis provides methods to work with a Swagger specification document from -package go-openapi/spec. - -Analyzing a specification - -An analysed specification object (type Spec) provides methods to work with swagger definition. - -Flattening or expanding a specification - -Flattening a specification bundles all remote $ref in the main spec document. -Depending on flattening options, additional preprocessing may take place: - - full flattening: replacing all inline complex constructs by a named entry in #/definitions - - expand: replace all $ref's in the document by their expanded content - -Merging several specifications - -Mixin several specifications merges all Swagger constructs, and warns about found conflicts. - -Fixing a specification - -Unmarshalling a specification with golang json unmarshalling may lead to -some unwanted result on present but empty fields. - -Analyzing a Swagger schema - -Swagger schemas are analyzed to determine their complexity and qualify their content. -*/ -package analysis diff --git a/vendor/github.com/go-openapi/analysis/fixer.go b/vendor/github.com/go-openapi/analysis/fixer.go deleted file mode 100644 index 7c2ca0841..000000000 --- a/vendor/github.com/go-openapi/analysis/fixer.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import "github.com/go-openapi/spec" - -// FixEmptyResponseDescriptions replaces empty ("") response -// descriptions in the input with "(empty)" to ensure that the -// resulting Swagger is stays valid. The problem appears to arise -// from reading in valid specs that have a explicit response -// description of "" (valid, response.description is required), but -// due to zero values being omitted upon re-serializing (omitempty) we -// lose them unless we stick some chars in there. -func FixEmptyResponseDescriptions(s *spec.Swagger) { - for k, v := range s.Responses { - FixEmptyDesc(&v) //#nosec - s.Responses[k] = v - } - - if s.Paths == nil { - return - } - - for _, v := range s.Paths.Paths { - if v.Get != nil { - FixEmptyDescs(v.Get.Responses) - } - if v.Put != nil { - FixEmptyDescs(v.Put.Responses) - } - if v.Post != nil { - FixEmptyDescs(v.Post.Responses) - } - if v.Delete != nil { - FixEmptyDescs(v.Delete.Responses) - } - if v.Options != nil { - FixEmptyDescs(v.Options.Responses) - } - if v.Head != nil { - FixEmptyDescs(v.Head.Responses) - } - if v.Patch != nil { - FixEmptyDescs(v.Patch.Responses) - } - } -} - -// FixEmptyDescs adds "(empty)" as the description for any Response in -// the given Responses object that doesn't already have one. -func FixEmptyDescs(rs *spec.Responses) { - FixEmptyDesc(rs.Default) - for k, v := range rs.StatusCodeResponses { - FixEmptyDesc(&v) //#nosec - rs.StatusCodeResponses[k] = v - } -} - -// FixEmptyDesc adds "(empty)" as the description to the given -// Response object if it doesn't already have one and isn't a -// ref. No-op on nil input. -func FixEmptyDesc(rs *spec.Response) { - if rs == nil || rs.Description != "" || rs.Ref.Ref.GetURL() != nil { - return - } - rs.Description = "(empty)" -} diff --git a/vendor/github.com/go-openapi/analysis/flatten.go b/vendor/github.com/go-openapi/analysis/flatten.go deleted file mode 100644 index 0576220fb..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten.go +++ /dev/null @@ -1,802 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "log" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -// newRef stores information about refs created during the flattening process -type newRef struct { - key string - newName string - path string - isOAIGen bool - resolved bool - schema *spec.Schema - parents []string -} - -// context stores intermediary results from flatten -type context struct { - newRefs map[string]*newRef - warnings []string - resolved map[string]string -} - -func newContext() *context { - return &context{ - newRefs: make(map[string]*newRef, 150), - warnings: make([]string, 0), - resolved: make(map[string]string, 50), - } -} - -// Flatten an analyzed spec and produce a self-contained spec bundle. -// -// There is a minimal and a full flattening mode. -// -// -// Minimally flattening a spec means: -// - Expanding parameters, responses, path items, parameter items and header items (references to schemas are left -// unscathed) -// - Importing external (http, file) references so they become internal to the document -// - Moving every JSON pointer to a $ref to a named definition (i.e. the reworked spec does not contain pointers -// like "$ref": "#/definitions/myObject/allOfs/1") -// -// A minimally flattened spec thus guarantees the following properties: -// - all $refs point to a local definition (i.e. '#/definitions/...') -// - definitions are unique -// -// NOTE: arbitrary JSON pointers (other than $refs to top level definitions) are rewritten as definitions if they -// represent a complex schema or express commonality in the spec. -// Otherwise, they are simply expanded. -// Self-referencing JSON pointers cannot resolve to a type and trigger an error. -// -// -// Minimal flattening is necessary and sufficient for codegen rendering using go-swagger. -// -// Fully flattening a spec means: -// - Moving every complex inline schema to be a definition with an auto-generated name in a depth-first fashion. -// -// By complex, we mean every JSON object with some properties. -// Arrays, when they do not define a tuple, -// or empty objects with or without additionalProperties, are not considered complex and remain inline. -// -// NOTE: rewritten schemas get a vendor extension x-go-gen-location so we know from which part of the spec definitions -// have been created. -// -// Available flattening options: -// - Minimal: stops flattening after minimal $ref processing, leaving schema constructs untouched -// - Expand: expand all $ref's in the document (inoperant if Minimal set to true) -// - Verbose: croaks about name conflicts detected -// - RemoveUnused: removes unused parameters, responses and definitions after expansion/flattening -// -// NOTE: expansion removes all $ref save circular $ref, which remain in place -// -// TODO: additional options -// - ProgagateNameExtensions: ensure that created entries properly follow naming rules when their parent have set a -// x-go-name extension -// - LiftAllOfs: -// - limit the flattening of allOf members when simple objects -// - merge allOf with validation only -// - merge allOf with extensions only -// - ... -// -func Flatten(opts FlattenOpts) error { - debugLog("FlattenOpts: %#v", opts) - - opts.flattenContext = newContext() - - // 1. Recursively expand responses, parameters, path items and items in simple schemas. - // - // This simplifies the spec and leaves only the $ref's in schema objects. - if err := expand(&opts); err != nil { - return err - } - - // 2. Strip the current document from absolute $ref's that actually a in the root, - // so we can recognize them as proper definitions - // - // In particular, this works around issue go-openapi/spec#76: leading absolute file in $ref is stripped - if err := normalizeRef(&opts); err != nil { - return err - } - - // 3. Optionally remove shared parameters and responses already expanded (now unused). - // - // Operation parameters (i.e. under paths) remain. - if opts.RemoveUnused { - removeUnusedShared(&opts) - } - - // 4. Import all remote references. - if err := importReferences(&opts); err != nil { - return err - } - - // 5. full flattening: rewrite inline schemas (schemas that aren't simple types or arrays or maps) - if !opts.Minimal && !opts.Expand { - if err := nameInlinedSchemas(&opts); err != nil { - return err - } - } - - // 6. Rewrite JSON pointers other than $ref to named definitions - // and attempt to resolve conflicting names whenever possible. - if err := stripPointersAndOAIGen(&opts); err != nil { - return err - } - - // 7. Strip the spec from unused definitions - if opts.RemoveUnused { - removeUnused(&opts) - } - - // 8. Issue warning notifications, if any - opts.croak() - - // TODO: simplify known schema patterns to flat objects with properties - // examples: - // - lift simple allOf object, - // - empty allOf with validation only or extensions only - // - rework allOf arrays - // - rework allOf additionalProperties - - return nil -} - -func expand(opts *FlattenOpts) error { - if err := spec.ExpandSpec(opts.Swagger(), opts.ExpandOpts(!opts.Expand)); err != nil { - return err - } - - opts.Spec.reload() // re-analyze - - return nil -} - -// normalizeRef strips the current file from any absolute file $ref. This works around issue go-openapi/spec#76: -// leading absolute file in $ref is stripped -func normalizeRef(opts *FlattenOpts) error { - debugLog("normalizeRef") - - altered := false - for k, w := range opts.Spec.references.allRefs { - if !strings.HasPrefix(w.String(), opts.BasePath+definitionsPath) { // may be a mix of / and \, depending on OS - continue - } - - altered = true - debugLog("stripping absolute path for: %s", w.String()) - - // strip the base path from definition - if err := replace.UpdateRef(opts.Swagger(), k, - spec.MustCreateRef(path.Join(definitionsPath, path.Base(w.String())))); err != nil { - return err - } - } - - if altered { - opts.Spec.reload() // re-analyze - } - - return nil -} - -func removeUnusedShared(opts *FlattenOpts) { - opts.Swagger().Parameters = nil - opts.Swagger().Responses = nil - - opts.Spec.reload() // re-analyze -} - -func importReferences(opts *FlattenOpts) error { - var ( - imported bool - err error - ) - - for !imported && err == nil { - // iteratively import remote references until none left. - // This inlining deals with name conflicts by introducing auto-generated names ("OAIGen") - imported, err = importExternalReferences(opts) - - opts.Spec.reload() // re-analyze - } - - return err -} - -// nameInlinedSchemas replaces every complex inline construct by a named definition. -func nameInlinedSchemas(opts *FlattenOpts) error { - debugLog("nameInlinedSchemas") - - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - depthFirst := sortref.DepthFirst(opts.Spec.allSchemas) - for _, key := range depthFirst { - sch := opts.Spec.allSchemas[key] - if sch.Schema == nil || sch.Schema.Ref.String() != "" || sch.TopLevel { - continue - } - - asch, err := Schema(SchemaOpts{Schema: sch.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, err) - } - - if asch.isAnalyzedAsComplex() { // move complex schemas to definitions - if err := namer.Name(key, sch.Schema, asch); err != nil { - return err - } - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func removeUnused(opts *FlattenOpts) { - expected := make(map[string]struct{}) - for k := range opts.Swagger().Definitions { - expected[path.Join(definitionsPath, jsonpointer.Escape(k))] = struct{}{} - } - - for _, k := range opts.Spec.AllDefinitionReferences() { - delete(expected, k) - } - - for k := range expected { - debugLog("removing unused definition %s", path.Base(k)) - if opts.Verbose { - log.Printf("info: removing unused definition: %s", path.Base(k)) - } - delete(opts.Swagger().Definitions, path.Base(k)) - } - - opts.Spec.reload() // re-analyze -} - -func importKnownRef(entry sortref.RefRevIdx, refStr, newName string, opts *FlattenOpts) error { - // rewrite ref with already resolved external ref (useful for cyclical refs): - // rewrite external refs to local ones - debugLog("resolving known ref [%s] to %s", refStr, newName) - - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - return nil -} - -func importNewRef(entry sortref.RefRevIdx, refStr string, opts *FlattenOpts) error { - var ( - isOAIGen bool - newName string - ) - - debugLog("resolving schema from remote $ref [%s]", refStr) - - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &entry.Ref, opts.ExpandOpts(false)) - if err != nil { - return fmt.Errorf("could not resolve schema: %w", err) - } - - // at this stage only $ref analysis matters - partialAnalyzer := &Spec{ - references: referenceAnalysis{}, - patterns: patternAnalysis{}, - enums: enumAnalysis{}, - } - partialAnalyzer.reset() - partialAnalyzer.analyzeSchema("", sch, "/") - - // now rewrite those refs with rebase - for key, ref := range partialAnalyzer.references.allRefs { - if err := replace.UpdateRef(sch, key, spec.MustCreateRef(normalize.RebaseRef(entry.Ref.String(), ref.String()))); err != nil { - return fmt.Errorf("failed to rewrite ref for key %q at %s: %w", key, entry.Ref.String(), err) - } - } - - // generate a unique name - isOAIGen means that a naming conflict was resolved by changing the name - newName, isOAIGen = uniqifyName(opts.Swagger().Definitions, nameFromRef(entry.Ref)) - debugLog("new name for [%s]: %s - with name conflict:%t", strings.Join(entry.Keys, ", "), newName, isOAIGen) - - opts.flattenContext.resolved[refStr] = newName - - // rewrite the external refs to local ones - for _, key := range entry.Keys { - if err := replace.UpdateRef(opts.Swagger(), key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - - // keep track of created refs - resolved := false - if _, ok := opts.flattenContext.newRefs[key]; ok { - resolved = opts.flattenContext.newRefs[key].resolved - } - - debugLog("keeping track of ref: %s (%s), resolved: %t", key, newName, resolved) - opts.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - // add the resolved schema to the definitions - schutils.Save(opts.Swagger(), newName, sch) - - return nil -} - -// importExternalReferences iteratively digs remote references and imports them into the main schema. -// -// At every iteration, new remotes may be found when digging deeper: they are rebased to the current schema before being imported. -// -// This returns true when no more remote references can be found. -func importExternalReferences(opts *FlattenOpts) (bool, error) { - debugLog("importExternalReferences") - - groupedRefs := sortref.ReverseIndex(opts.Spec.references.schemas, opts.BasePath) - sortedRefStr := make([]string, 0, len(groupedRefs)) - if opts.flattenContext == nil { - opts.flattenContext = newContext() - } - - // sort $ref resolution to ensure deterministic name conflict resolution - for refStr := range groupedRefs { - sortedRefStr = append(sortedRefStr, refStr) - } - sort.Strings(sortedRefStr) - - complete := true - - for _, refStr := range sortedRefStr { - entry := groupedRefs[refStr] - if entry.Ref.HasFragmentOnly { - continue - } - - complete = false - - newName := opts.flattenContext.resolved[refStr] - if newName != "" { - if err := importKnownRef(entry, refStr, newName, opts); err != nil { - return false, err - } - - continue - } - - // resolve schemas - if err := importNewRef(entry, refStr, opts); err != nil { - return false, err - } - } - - // maintains ref index entries - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - - // update tracking with resolved schemas - if r.schema.Ref.String() != "" { - ref := spec.MustCreateRef(r.path) - sch, err := spec.ResolveRefWithBase(opts.Swagger(), &ref, opts.ExpandOpts(false)) - if err != nil { - return false, fmt.Errorf("could not resolve schema: %w", err) - } - - r.schema = sch - } - - if r.path == k { - continue - } - - // update tracking with renamed keys: got a cascade of refs - renamed := *r - renamed.key = r.path - opts.flattenContext.newRefs[renamed.path] = &renamed - - // indirect ref - r.newName = path.Base(k) - r.schema = spec.RefSchema(r.path) - r.path = k - r.isOAIGen = strings.Contains(k, "OAIGen") - } - - return complete, nil -} - -// stripPointersAndOAIGen removes anonymous JSON pointers from spec and chain with name conflicts handler. -// This loops until the spec has no such pointer and all name conflicts have been reduced as much as possible. -func stripPointersAndOAIGen(opts *FlattenOpts) error { - // name all JSON pointers to anonymous documents - if err := namePointers(opts); err != nil { - return err - } - - // remove unnecessary OAIGen ref (created when flattening external refs creates name conflicts) - hasIntroducedPointerOrInline, ers := stripOAIGen(opts) - if ers != nil { - return ers - } - - // iterate as pointer or OAIGen resolution may introduce inline schemas or pointers - for hasIntroducedPointerOrInline { - if !opts.Minimal { - opts.Spec.reload() // re-analyze - if err := nameInlinedSchemas(opts); err != nil { - return err - } - } - - if err := namePointers(opts); err != nil { - return err - } - - // restrip and re-analyze - var err error - if hasIntroducedPointerOrInline, err = stripOAIGen(opts); err != nil { - return err - } - } - - return nil -} - -// stripOAIGen strips the spec from unnecessary OAIGen constructs, initially created to dedupe flattened definitions. -// -// A dedupe is deemed unnecessary whenever: -// - the only conflict is with its (single) parent: OAIGen is merged into its parent (reinlining) -// - there is a conflict with multiple parents: merge OAIGen in first parent, the rewrite other parents to point to -// the first parent. -// -// This function returns true whenever it re-inlined a complex schema, so the caller may chose to iterate -// pointer and name resolution again. -func stripOAIGen(opts *FlattenOpts) (bool, error) { - debugLog("stripOAIGen") - replacedWithComplex := false - - // figure out referers of OAIGen definitions (doing it before the ref start mutating) - for _, r := range opts.flattenContext.newRefs { - updateRefParents(opts.Spec.references.allRefs, r) - } - - for k := range opts.flattenContext.newRefs { - r := opts.flattenContext.newRefs[k] - debugLog("newRefs[%s]: isOAIGen: %t, resolved: %t, name: %s, path:%s, #parents: %d, parents: %v, ref: %s", - k, r.isOAIGen, r.resolved, r.newName, r.path, len(r.parents), r.parents, r.schema.Ref.String()) - - if !r.isOAIGen || len(r.parents) == 0 { - continue - } - - hasReplacedWithComplex, err := stripOAIGenForRef(opts, k, r) - if err != nil { - return replacedWithComplex, err - } - - replacedWithComplex = replacedWithComplex || hasReplacedWithComplex - } - - debugLog("replacedWithComplex: %t", replacedWithComplex) - opts.Spec.reload() // re-analyze - - return replacedWithComplex, nil -} - -// updateRefParents updates all parents of an updated $ref -func updateRefParents(allRefs map[string]spec.Ref, r *newRef) { - if !r.isOAIGen || r.resolved { // bail on already resolved entries (avoid looping) - return - } - for k, v := range allRefs { - if r.path != v.String() { - continue - } - - found := false - for _, p := range r.parents { - if p == k { - found = true - - break - } - } - if !found { - r.parents = append(r.parents, k) - } - } -} - -func stripOAIGenForRef(opts *FlattenOpts, k string, r *newRef) (bool, error) { - replacedWithComplex := false - - pr := sortref.TopmostFirst(r.parents) - - // rewrite first parent schema in hierarchical then lexicographical order - debugLog("rewrite first parent %s with schema", pr[0]) - if err := replace.UpdateRefWithSchema(opts.Swagger(), pr[0], r.schema); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[pr[0]]; ok && pa.isOAIGen { - // update parent in ref index entry - debugLog("update parent entry: %s", pr[0]) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - - // rewrite other parents to point to first parent - if len(pr) > 1 { - for _, p := range pr[1:] { - replacingRef := spec.MustCreateRef(pr[0]) - - // set complex when replacing ref is an anonymous jsonpointer: further processing may be required - replacedWithComplex = replacedWithComplex || path.Dir(replacingRef.String()) != definitionsPath - debugLog("rewrite parent with ref: %s", replacingRef.String()) - - // NOTE: it is possible at this stage to introduce json pointers (to non-definitions places). - // Those are stripped later on. - if err := replace.UpdateRef(opts.Swagger(), p, replacingRef); err != nil { - return false, err - } - - if pa, ok := opts.flattenContext.newRefs[p]; ok && pa.isOAIGen { - // update parent in ref index - debugLog("update parent entry: %s", p) - pa.schema = r.schema - pa.resolved = false - replacedWithComplex = true - } - } - } - - // remove OAIGen definition - debugLog("removing definition %s", path.Base(r.path)) - delete(opts.Swagger().Definitions, path.Base(r.path)) - - // propagate changes in ref index for keys which have this one as a parent - for kk, value := range opts.flattenContext.newRefs { - if kk == k || !value.isOAIGen || value.resolved { - continue - } - - found := false - newParents := make([]string, 0, len(value.parents)) - for _, parent := range value.parents { - switch { - case parent == r.path: - found = true - parent = pr[0] - case strings.HasPrefix(parent, r.path+"/"): - found = true - parent = path.Join(pr[0], strings.TrimPrefix(parent, r.path)) - } - - newParents = append(newParents, parent) - } - - if found { - value.parents = newParents - } - } - - // mark naming conflict as resolved - debugLog("marking naming conflict resolved for key: %s", r.key) - opts.flattenContext.newRefs[r.key].isOAIGen = false - opts.flattenContext.newRefs[r.key].resolved = true - - // determine if the previous substitution did inline a complex schema - if r.schema != nil && r.schema.Ref.String() == "" { // inline schema - asch, err := Schema(SchemaOpts{Schema: r.schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if err != nil { - return false, err - } - - debugLog("re-inlined schema: parent: %s, %t", pr[0], asch.isAnalyzedAsComplex()) - replacedWithComplex = replacedWithComplex || !(path.Dir(pr[0]) == definitionsPath) && asch.isAnalyzedAsComplex() - } - - return replacedWithComplex, nil -} - -// namePointers replaces all JSON pointers to anonymous documents by a $ref to a new named definitions. -// -// This is carried on depth-first. Pointers to $refs which are top level definitions are replaced by the $ref itself. -// Pointers to simple types are expanded, unless they express commonality (i.e. several such $ref are used). -func namePointers(opts *FlattenOpts) error { - debugLog("name pointers") - - refsToReplace := make(map[string]SchemaRef, len(opts.Spec.references.schemas)) - for k, ref := range opts.Spec.references.allRefs { - if path.Dir(ref.String()) == definitionsPath { - // this a ref to a top-level definition: ok - continue - } - - result, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), ref) - if err != nil { - return fmt.Errorf("at %s, %w", k, err) - } - - replacingRef := result.Ref - sch := result.Schema - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - debugLog("planning pointer to replace at %s: %s, resolved to: %s", k, ref.String(), replacingRef.String()) - refsToReplace[k] = SchemaRef{ - Name: k, // caller - Ref: replacingRef, // called - Schema: sch, - TopLevel: path.Dir(replacingRef.String()) == definitionsPath, - } - } - - depthFirst := sortref.DepthFirst(refsToReplace) - namer := &InlineSchemaNamer{ - Spec: opts.Swagger(), - Operations: operations.AllOpRefsByRef(opts.Spec, nil), - flattenContext: opts.flattenContext, - opts: opts, - } - - for _, key := range depthFirst { - v := refsToReplace[key] - // update current replacement, which may have been updated by previous changes of deeper elements - result, erd := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), v.Ref) - if erd != nil { - return fmt.Errorf("at %s, %w", key, erd) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, result.Warnings...) - } - - v.Ref = result.Ref - v.Schema = result.Schema - v.TopLevel = path.Dir(result.Ref.String()) == definitionsPath - debugLog("replacing pointer at %s: resolved to: %s", key, v.Ref.String()) - - if v.TopLevel { - debugLog("replace pointer %s by canonical definition: %s", key, v.Ref.String()) - - // if the schema is a $ref to a top level definition, just rewrite the pointer to this $ref - if err := replace.UpdateRef(opts.Swagger(), key, v.Ref); err != nil { - return err - } - - continue - } - - if err := flattenAnonPointer(key, v, refsToReplace, namer, opts); err != nil { - return err - } - } - - opts.Spec.reload() // re-analyze - - return nil -} - -func flattenAnonPointer(key string, v SchemaRef, refsToReplace map[string]SchemaRef, namer *InlineSchemaNamer, opts *FlattenOpts) error { - // this is a JSON pointer to an anonymous document (internal or external): - // create a definition for this schema when: - // - it is a complex schema - // - or it is pointed by more than one $ref (i.e. expresses commonality) - // otherwise, expand the pointer (single reference to a simple type) - // - // The named definition for this follows the target's key, not the caller's - debugLog("namePointers at %s for %s", key, v.Ref.String()) - - // qualify the expanded schema - asch, ers := Schema(SchemaOpts{Schema: v.Schema, Root: opts.Swagger(), BasePath: opts.BasePath}) - if ers != nil { - return fmt.Errorf("schema analysis [%s]: %w", key, ers) - } - callers := make([]string, 0, 64) - - debugLog("looking for callers") - - an := New(opts.Swagger()) - for k, w := range an.references.allRefs { - r, err := replace.DeepestRef(opts.Swagger(), opts.ExpandOpts(false), w) - if err != nil { - return fmt.Errorf("at %s, %w", key, err) - } - - if opts.flattenContext != nil { - opts.flattenContext.warnings = append(opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() == v.Ref.String() { - callers = append(callers, k) - } - } - - debugLog("callers for %s: %d", v.Ref.String(), len(callers)) - if len(callers) == 0 { - // has already been updated and resolved - return nil - } - - parts := sortref.KeyParts(v.Ref.String()) - debugLog("number of callers for %s: %d", v.Ref.String(), len(callers)) - - // identifying edge case when the namer did nothing because we point to a non-schema object - // no definition is created and we expand the $ref for all callers - if (!asch.IsSimpleSchema || len(callers) > 1) && !parts.IsSharedParam() && !parts.IsSharedResponse() { - debugLog("replace JSON pointer at [%s] by definition: %s", key, v.Ref.String()) - if err := namer.Name(v.Ref.String(), v.Schema, asch); err != nil { - return err - } - - // regular case: we named the $ref as a definition, and we move all callers to this new $ref - for _, caller := range callers { - if caller == key { - continue - } - - // move $ref for next to resolve - debugLog("identified caller of %s at [%s]", v.Ref.String(), caller) - c := refsToReplace[caller] - c.Ref = v.Ref - refsToReplace[caller] = c - } - - return nil - } - - debugLog("expand JSON pointer for key=%s", key) - - if err := replace.UpdateRefWithSchema(opts.Swagger(), key, v.Schema); err != nil { - return err - } - // NOTE: there is no other caller to update - - return nil -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_name.go b/vendor/github.com/go-openapi/analysis/flatten_name.go deleted file mode 100644 index 3ad2ccfbf..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten_name.go +++ /dev/null @@ -1,293 +0,0 @@ -package analysis - -import ( - "fmt" - "path" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/operations" - "github.com/go-openapi/analysis/internal/flatten/replace" - "github.com/go-openapi/analysis/internal/flatten/schutils" - "github.com/go-openapi/analysis/internal/flatten/sortref" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// InlineSchemaNamer finds a new name for an inlined type -type InlineSchemaNamer struct { - Spec *spec.Swagger - Operations map[string]operations.OpRef - flattenContext *context - opts *FlattenOpts -} - -// Name yields a new name for the inline schema -func (isn *InlineSchemaNamer) Name(key string, schema *spec.Schema, aschema *AnalyzedSchema) error { - debugLog("naming inlined schema at %s", key) - - parts := sortref.KeyParts(key) - for _, name := range namesFromKey(parts, aschema, isn.Operations) { - if name == "" { - continue - } - - // create unique name - newName, isOAIGen := uniqifyName(isn.Spec.Definitions, swag.ToJSONName(name)) - - // clone schema - sch := schutils.Clone(schema) - - // replace values on schema - if err := replace.RewriteSchemaToRef(isn.Spec, key, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return fmt.Errorf("error while creating definition %q from inline schema: %w", newName, err) - } - - // rewrite any dependent $ref pointing to this place, - // when not already pointing to a top-level definition. - // - // NOTE: this is important if such referers use arbitrary JSON pointers. - an := New(isn.Spec) - for k, v := range an.references.allRefs { - r, erd := replace.DeepestRef(isn.opts.Swagger(), isn.opts.ExpandOpts(false), v) - if erd != nil { - return fmt.Errorf("at %s, %w", k, erd) - } - - if isn.opts.flattenContext != nil { - isn.opts.flattenContext.warnings = append(isn.opts.flattenContext.warnings, r.Warnings...) - } - - if r.Ref.String() != key && (r.Ref.String() != path.Join(definitionsPath, newName) || path.Dir(v.String()) == definitionsPath) { - continue - } - - debugLog("found a $ref to a rewritten schema: %s points to %s", k, v.String()) - - // rewrite $ref to the new target - if err := replace.UpdateRef(isn.Spec, k, - spec.MustCreateRef(path.Join(definitionsPath, newName))); err != nil { - return err - } - } - - // NOTE: this extension is currently not used by go-swagger (provided for information only) - sch.AddExtension("x-go-gen-location", GenLocation(parts)) - - // save cloned schema to definitions - schutils.Save(isn.Spec, newName, sch) - - // keep track of created refs - if isn.flattenContext == nil { - continue - } - - debugLog("track created ref: key=%s, newName=%s, isOAIGen=%t", key, newName, isOAIGen) - resolved := false - - if _, ok := isn.flattenContext.newRefs[key]; ok { - resolved = isn.flattenContext.newRefs[key].resolved - } - - isn.flattenContext.newRefs[key] = &newRef{ - key: key, - newName: newName, - path: path.Join(definitionsPath, newName), - isOAIGen: isOAIGen, - resolved: resolved, - schema: sch, - } - } - - return nil -} - -// uniqifyName yields a unique name for a definition -func uniqifyName(definitions spec.Definitions, name string) (string, bool) { - isOAIGen := false - if name == "" { - name = "oaiGen" - isOAIGen = true - } - - if len(definitions) == 0 { - return name, isOAIGen - } - - unq := true - for k := range definitions { - if strings.EqualFold(k, name) { - unq = false - - break - } - } - - if unq { - return name, isOAIGen - } - - name += "OAIGen" - isOAIGen = true - var idx int - unique := name - _, known := definitions[unique] - - for known { - idx++ - unique = fmt.Sprintf("%s%d", name, idx) - _, known = definitions[unique] - } - - return unique, isOAIGen -} - -func namesFromKey(parts sortref.SplitKey, aschema *AnalyzedSchema, operations map[string]operations.OpRef) []string { - var ( - baseNames [][]string - startIndex int - ) - - if parts.IsOperation() { - baseNames, startIndex = namesForOperation(parts, operations) - } - - // definitions - if parts.IsDefinition() { - baseNames, startIndex = namesForDefinition(parts) - } - - result := make([]string, 0, len(baseNames)) - for _, segments := range baseNames { - nm := parts.BuildName(segments, startIndex, partAdder(aschema)) - if nm == "" { - continue - } - - result = append(result, nm) - } - sort.Strings(result) - - return result -} - -func namesForParam(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - piref := parts.PathItemRef() - if piref.String() != "" && parts.IsOperationParam() { - if op, ok := operations[piref.String()]; ok { - startIndex = 5 - baseNames = append(baseNames, []string{op.ID, "params", "body"}) - } - } else if parts.IsSharedOperationParam() { - pref := parts.PathRef() - for k, v := range operations { - if strings.HasPrefix(k, pref.String()) { - startIndex = 4 - baseNames = append(baseNames, []string{v.ID, "params", "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForOperation(parts sortref.SplitKey, operations map[string]operations.OpRef) ([][]string, int) { - var ( - baseNames [][]string - startIndex int - ) - - // params - if parts.IsOperationParam() || parts.IsSharedOperationParam() { - baseNames, startIndex = namesForParam(parts, operations) - } - - // responses - if parts.IsOperationResponse() { - piref := parts.PathItemRef() - if piref.String() != "" { - if op, ok := operations[piref.String()]; ok { - startIndex = 6 - baseNames = append(baseNames, []string{op.ID, parts.ResponseName(), "body"}) - } - } - } - - return baseNames, startIndex -} - -func namesForDefinition(parts sortref.SplitKey) ([][]string, int) { - nm := parts.DefinitionName() - if nm != "" { - return [][]string{{parts.DefinitionName()}}, 2 - } - - return [][]string{}, 0 -} - -// partAdder knows how to interpret a schema when it comes to build a name from parts -func partAdder(aschema *AnalyzedSchema) sortref.PartAdder { - return func(part string) []string { - segments := make([]string, 0, 2) - - if part == "items" || part == "additionalItems" { - if aschema.IsTuple || aschema.IsTupleWithExtra { - segments = append(segments, "tuple") - } else { - segments = append(segments, "items") - } - - if part == "additionalItems" { - segments = append(segments, part) - } - - return segments - } - - segments = append(segments, part) - - return segments - } -} - -func nameFromRef(ref spec.Ref) string { - u := ref.GetURL() - if u.Fragment != "" { - return swag.ToJSONName(path.Base(u.Fragment)) - } - - if u.Path != "" { - bn := path.Base(u.Path) - if bn != "" && bn != "/" { - ext := path.Ext(bn) - if ext != "" { - return swag.ToJSONName(bn[:len(bn)-len(ext)]) - } - - return swag.ToJSONName(bn) - } - } - - return swag.ToJSONName(strings.ReplaceAll(u.Host, ".", " ")) -} - -// GenLocation indicates from which section of the specification (models or operations) a definition has been created. -// -// This is reflected in the output spec with a "x-go-gen-location" extension. At the moment, this is is provided -// for information only. -func GenLocation(parts sortref.SplitKey) string { - switch { - case parts.IsOperation(): - return "operations" - case parts.IsDefinition(): - return "models" - default: - return "" - } -} diff --git a/vendor/github.com/go-openapi/analysis/flatten_options.go b/vendor/github.com/go-openapi/analysis/flatten_options.go deleted file mode 100644 index c5bb97b0a..000000000 --- a/vendor/github.com/go-openapi/analysis/flatten_options.go +++ /dev/null @@ -1,78 +0,0 @@ -package analysis - -import ( - "log" - - "github.com/go-openapi/spec" -) - -// FlattenOpts configuration for flattening a swagger specification. -// -// The BasePath parameter is used to locate remote relative $ref found in the specification. -// This path is a file: it points to the location of the root document and may be either a local -// file path or a URL. -// -// If none specified, relative references (e.g. "$ref": "folder/schema.yaml#/definitions/...") -// found in the spec are searched from the current working directory. -type FlattenOpts struct { - Spec *Spec // The analyzed spec to work with - flattenContext *context // Internal context to track flattening activity - - BasePath string // The location of the root document for this spec to resolve relative $ref - - // Flattening options - Expand bool // When true, skip flattening the spec and expand it instead (if Minimal is false) - Minimal bool // When true, do not decompose complex structures such as allOf - Verbose bool // enable some reporting on possible name conflicts detected - RemoveUnused bool // When true, remove unused parameters, responses and definitions after expansion/flattening - ContinueOnError bool // Continue when spec expansion issues are found - - /* Extra keys */ - _ struct{} // require keys -} - -// ExpandOpts creates a spec.ExpandOptions to configure expanding a specification document. -func (f *FlattenOpts) ExpandOpts(skipSchemas bool) *spec.ExpandOptions { - return &spec.ExpandOptions{ - RelativeBase: f.BasePath, - SkipSchemas: skipSchemas, - ContinueOnError: f.ContinueOnError, - } -} - -// Swagger gets the swagger specification for this flatten operation -func (f *FlattenOpts) Swagger() *spec.Swagger { - return f.Spec.spec -} - -// croak logs notifications and warnings about valid, but possibly unwanted constructs resulting -// from flattening a spec -func (f *FlattenOpts) croak() { - if !f.Verbose { - return - } - - reported := make(map[string]bool, len(f.flattenContext.newRefs)) - for _, v := range f.Spec.references.allRefs { - // warns about duplicate handling - for _, r := range f.flattenContext.newRefs { - if r.isOAIGen && r.path == v.String() { - reported[r.newName] = true - } - } - } - - for k := range reported { - log.Printf("warning: duplicate flattened definition name resolved as %s", k) - } - - // warns about possible type mismatches - uniqueMsg := make(map[string]bool) - for _, msg := range f.flattenContext.warnings { - if _, ok := uniqueMsg[msg]; ok { - continue - } - log.Printf("warning: %s", msg) - uniqueMsg[msg] = true - } -} diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go deleted file mode 100644 index ec0fec022..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package debug - -import ( - "fmt" - "log" - "os" - "path/filepath" - "runtime" -) - -var ( - output = os.Stdout -) - -// GetLogger provides a prefix debug logger -func GetLogger(prefix string, debug bool) func(string, ...interface{}) { - if debug { - logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) - - return func(msg string, args ...interface{}) { - _, file1, pos1, _ := runtime.Caller(1) - logger.Printf("%s:%d: %s", filepath.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } - } - - return func(msg string, args ...interface{}) {} -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go b/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go deleted file mode 100644 index 8c9df0580..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/normalize/normalize.go +++ /dev/null @@ -1,87 +0,0 @@ -package normalize - -import ( - "net/url" - "path" - "path/filepath" - "strings" - - "github.com/go-openapi/spec" -) - -// RebaseRef rebases a remote ref relative to a base ref. -// -// NOTE: does not support JSONschema ID for $ref (we assume we are working with swagger specs here). -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func RebaseRef(baseRef string, ref string) string { - baseRef, _ = url.PathUnescape(baseRef) - ref, _ = url.PathUnescape(ref) - - if baseRef == "" || baseRef == "." || strings.HasPrefix(baseRef, "#") { - return ref - } - - parts := strings.Split(ref, "#") - - baseParts := strings.Split(baseRef, "#") - baseURL, _ := url.Parse(baseParts[0]) - if strings.HasPrefix(ref, "#") { - if baseURL.Host == "" { - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - return strings.Join([]string{baseParts[0], parts[1]}, "#") - } - - refURL, _ := url.Parse(parts[0]) - if refURL.Host != "" || filepath.IsAbs(parts[0]) { - // not rebasing an absolute path - return ref - } - - // there is a relative path - var basePath string - if baseURL.Host != "" { - // when there is a host, standard URI rules apply (with "/") - baseURL.Path = path.Dir(baseURL.Path) - baseURL.Path = path.Join(baseURL.Path, "/"+parts[0]) - - return baseURL.String() - } - - // this is a local relative path - // basePart[0] and parts[0] are local filesystem directories/files - basePath = filepath.Dir(baseParts[0]) - relPath := filepath.Join(basePath, string(filepath.Separator)+parts[0]) - if len(parts) > 1 { - return strings.Join([]string{relPath, parts[1]}, "#") - } - - return relPath -} - -// Path renders absolute path on remote file refs -// -// NOTE(windows): -// * refs are assumed to have been normalized with drive letter lower cased (from go-openapi/spec) -// * "/ in paths may appear as escape sequences -func Path(ref spec.Ref, basePath string) string { - uri, _ := url.PathUnescape(ref.String()) - if ref.HasFragmentOnly || filepath.IsAbs(uri) { - return uri - } - - refURL, _ := url.Parse(uri) - if refURL.Host != "" { - return uri - } - - parts := strings.Split(uri, "#") - // BasePath, parts[0] are local filesystem directories, guaranteed to be absolute at this stage - parts[0] = filepath.Join(filepath.Dir(basePath), parts[0]) - - return strings.Join(parts, "#") -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go b/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go deleted file mode 100644 index 7f3a2b871..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/operations/operations.go +++ /dev/null @@ -1,90 +0,0 @@ -package operations - -import ( - "path" - "sort" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// AllOpRefsByRef returns an index of sortable operations -func AllOpRefsByRef(specDoc Provider, operationIDs []string) map[string]OpRef { - return OpRefsByRef(GatherOperations(specDoc, operationIDs)) -} - -// OpRefsByRef indexes a map of sortable operations -func OpRefsByRef(oprefs map[string]OpRef) map[string]OpRef { - result := make(map[string]OpRef, len(oprefs)) - for _, v := range oprefs { - result[v.Ref.String()] = v - } - - return result -} - -// OpRef is an indexable, sortable operation -type OpRef struct { - Method string - Path string - Key string - ID string - Op *spec.Operation - Ref spec.Ref -} - -// OpRefs is a sortable collection of operations -type OpRefs []OpRef - -func (o OpRefs) Len() int { return len(o) } -func (o OpRefs) Swap(i, j int) { o[i], o[j] = o[j], o[i] } -func (o OpRefs) Less(i, j int) bool { return o[i].Key < o[j].Key } - -// Provider knows how to collect operations from a spec -type Provider interface { - Operations() map[string]map[string]*spec.Operation -} - -// GatherOperations builds a map of sorted operations from a spec -func GatherOperations(specDoc Provider, operationIDs []string) map[string]OpRef { - var oprefs OpRefs - - for method, pathItem := range specDoc.Operations() { - for pth, operation := range pathItem { - vv := *operation - oprefs = append(oprefs, OpRef{ - Key: swag.ToGoName(strings.ToLower(method) + " " + pth), - Method: method, - Path: pth, - ID: vv.ID, - Op: &vv, - Ref: spec.MustCreateRef("#" + path.Join("/paths", jsonpointer.Escape(pth), method)), - }) - } - } - - sort.Sort(oprefs) - - operations := make(map[string]OpRef) - for _, opr := range oprefs { - nm := opr.ID - if nm == "" { - nm = opr.Key - } - - oo, found := operations[nm] - if found && oo.Method != opr.Method && oo.Path != opr.Path { - nm = opr.Key - } - - if len(operationIDs) == 0 || swag.ContainsStrings(operationIDs, opr.ID) || swag.ContainsStrings(operationIDs, nm) { - opr.ID = nm - opr.Op.ID = nm - operations[nm] = opr - } - } - - return operations -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go b/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go deleted file mode 100644 index 26c2a05a3..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/replace/replace.go +++ /dev/null @@ -1,434 +0,0 @@ -package replace - -import ( - "fmt" - "net/url" - "os" - "path" - "strconv" - - "github.com/go-openapi/analysis/internal/debug" - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const definitionsPath = "#/definitions" - -var debugLog = debug.GetLogger("analysis/flatten/replace", os.Getenv("SWAGGER_DEBUG") != "") - -// RewriteSchemaToRef replaces a schema with a Ref -func RewriteSchemaToRef(sp *spec.Swagger, key string, ref spec.Ref) error { - debugLog("rewriting schema to ref for %s with %s", key, ref.String()) - _, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - return rewriteParentRef(sp, key, ref) - - case spec.Schema: - return rewriteParentRef(sp, key, ref) - - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - } - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -func rewriteParentRef(sp *spec.Swagger, key string, ref spec.Ref) error { - parent, entry, pvalue, err := getParentFromKey(sp, key) - if err != nil { - return err - } - - debugLog("rewriting holder for %T", pvalue) - switch container := pvalue.(type) { - case spec.Response: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case *spec.Response: - container.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.Responses: - statusCode, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - resp := container.StatusCodeResponses[statusCode] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container.StatusCodeResponses[statusCode] = resp - - case map[string]spec.Response: - resp := container[entry] - resp.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = resp - - case spec.Parameter: - if err := rewriteParentRef(sp, "#"+parent, ref); err != nil { - return err - } - - case map[string]spec.Parameter: - param := container[entry] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[entry] = param - - case []spec.Parameter: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - param := container[idx] - param.Schema = &spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - container[idx] = param - - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", key[1:], err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled parent schema rewrite %s (%T)", key, pvalue) - } - - return nil -} - -// getPointerFromKey retrieves the content of the JSON pointer "key" -func getPointerFromKey(sp interface{}, key string) (string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - if key == "#/" { - return "", sp, nil - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - ptr, err := jsonpointer.New(pth) - if err != nil { - return "", nil, err - } - - value, _, err := ptr.Get(sp) - if err != nil { - debugLog("error when getting key: %s with path: %s", key, pth) - - return "", nil, err - } - - return pth, value, nil -} - -// getParentFromKey retrieves the container of the JSON pointer "key" -func getParentFromKey(sp interface{}, key string) (string, string, interface{}, error) { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - // unescape chars in key, e.g. "{}" from path params - pth, _ := url.PathUnescape(key[1:]) - - parent, entry := path.Dir(pth), path.Base(pth) - debugLog("getting schema holder at: %s, with entry: %s", parent, entry) - - pptr, err := jsonpointer.New(parent) - if err != nil { - return "", "", nil, err - } - pvalue, _, err := pptr.Get(sp) - if err != nil { - return "", "", nil, fmt.Errorf("can't get parent for %s: %w", parent, err) - } - - return parent, entry, pvalue, nil -} - -// UpdateRef replaces a ref by another one -func UpdateRef(sp interface{}, key string, ref spec.Ref) error { - switch sp.(type) { - case *spec.Schema: - case *spec.Swagger: - default: - panic("unexpected type used in getPointerFromKey") - } - debugLog("updating ref for %s with %s", key, ref.String()) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - refable.Ref = ref - case *spec.SchemaOrArray: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case *spec.SchemaOrBool: - if refable.Schema != nil { - refable.Schema.Ref = ref - } - case spec.Schema: - debugLog("rewriting holder for %T", refable) - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case map[string]spec.Schema: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - case spec.SchemaProperties: - container[entry] = spec.Schema{SchemaProps: spec.SchemaProps{Ref: ref}} - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled container type at %s: %T", key, value) - } - - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// UpdateRefWithSchema replaces a ref with a schema (i.e. re-inline schema) -func UpdateRefWithSchema(sp *spec.Swagger, key string, sch *spec.Schema) error { - debugLog("updating ref for %s with schema", key) - pth, value, err := getPointerFromKey(sp, key) - if err != nil { - return err - } - - switch refable := value.(type) { - case *spec.Schema: - *refable = *sch - case spec.Schema: - _, entry, pvalue, erp := getParentFromKey(sp, key) - if erp != nil { - return err - } - switch container := pvalue.(type) { - case spec.Definitions: - container[entry] = *sch - - case map[string]spec.Schema: - container[entry] = *sch - - case []spec.Schema: - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container[idx] = *sch - - case *spec.SchemaOrArray: - // NOTE: this is necessarily an array - otherwise, the parent would be *Schema - idx, err := strconv.Atoi(entry) - if err != nil { - return fmt.Errorf("%s not a number: %w", pth, err) - } - container.Schemas[idx] = *sch - - case spec.SchemaProperties: - container[entry] = *sch - - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - - default: - return fmt.Errorf("unhandled type for parent of [%s]: %T", key, value) - } - case *spec.SchemaOrArray: - *refable.Schema = *sch - // NOTE: can't have case *spec.SchemaOrBool = parent in this case is *Schema - case *spec.SchemaOrBool: - *refable.Schema = *sch - default: - return fmt.Errorf("no schema with ref found at %s for %T", key, value) - } - - return nil -} - -// DeepestRefResult holds the results from DeepestRef analysis -type DeepestRefResult struct { - Ref spec.Ref - Schema *spec.Schema - Warnings []string -} - -// DeepestRef finds the first definition ref, from a cascade of nested refs which are not definitions. -// - if no definition is found, returns the deepest ref. -// - pointers to external files are expanded -// -// NOTE: all external $ref's are assumed to be already expanded at this stage. -func DeepestRef(sp *spec.Swagger, opts *spec.ExpandOptions, ref spec.Ref) (*DeepestRefResult, error) { - if !ref.HasFragmentOnly { - // we found an external $ref, which is odd at this stage: - // do nothing on external $refs - return &DeepestRefResult{Ref: ref}, nil - } - - currentRef := ref - visited := make(map[string]bool, 64) - warnings := make([]string, 0, 2) - -DOWNREF: - for currentRef.String() != "" { - if path.Dir(currentRef.String()) == definitionsPath { - // this is a top-level definition: stop here and return this ref - return &DeepestRefResult{Ref: currentRef}, nil - } - - if _, beenThere := visited[currentRef.String()]; beenThere { - return nil, - fmt.Errorf("cannot resolve cyclic chain of pointers under %s", currentRef.String()) - } - - visited[currentRef.String()] = true - value, _, err := currentRef.GetPointer().Get(sp) - if err != nil { - return nil, err - } - - switch refable := value.(type) { - case *spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case spec.Schema: - if refable.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Ref - - case *spec.SchemaOrArray: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case *spec.SchemaOrBool: - if refable.Schema == nil || refable.Schema != nil && refable.Schema.Ref.String() == "" { - break DOWNREF - } - currentRef = refable.Schema.Ref - - case spec.Response: - // a pointer points to a schema initially marshalled in responses section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - - err := asSchema.UnmarshalJSON(asJSON) - if err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - } - warnings = append(warnings, fmt.Sprintf("found $ref %q (response) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - case spec.Parameter: - // a pointer points to a schema initially marshalled in parameters section... - // Attempt to convert this to a schema. If this fails, the spec is invalid - asJSON, _ := refable.MarshalJSON() - var asSchema spec.Schema - if err := asSchema.UnmarshalJSON(asJSON); err != nil { - return nil, - fmt.Errorf("invalid type for resolved JSON pointer %s. Expected a schema a, got: %T", - currentRef.String(), value) - } - - warnings = append(warnings, fmt.Sprintf("found $ref %q (parameter) interpreted as schema", currentRef.String())) - - if asSchema.Ref.String() == "" { - break DOWNREF - } - currentRef = asSchema.Ref - - default: - return nil, - fmt.Errorf("unhandled type to resolve JSON pointer %s. Expected a Schema, got: %T", - currentRef.String(), value) - } - } - - // assess what schema we're ending with - sch, erv := spec.ResolveRefWithBase(sp, ¤tRef, opts) - if erv != nil { - return nil, erv - } - - if sch == nil { - return nil, fmt.Errorf("no schema found at %s", currentRef.String()) - } - - return &DeepestRefResult{Ref: currentRef, Schema: sch, Warnings: warnings}, nil -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go b/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go deleted file mode 100644 index 4590236e6..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/schutils/flatten_schema.go +++ /dev/null @@ -1,29 +0,0 @@ -// Package schutils provides tools to save or clone a schema -// when flattening a spec. -package schutils - -import ( - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -// Save registers a schema as an entry in spec #/definitions -func Save(sp *spec.Swagger, name string, schema *spec.Schema) { - if schema == nil { - return - } - - if sp.Definitions == nil { - sp.Definitions = make(map[string]spec.Schema, 150) - } - - sp.Definitions[name] = *schema -} - -// Clone deep-clones a schema -func Clone(schema *spec.Schema) *spec.Schema { - var sch spec.Schema - _ = swag.FromDynamicJSON(schema, &sch) - - return &sch -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go deleted file mode 100644 index 18e552ead..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/keys.go +++ /dev/null @@ -1,201 +0,0 @@ -package sortref - -import ( - "net/http" - "path" - "strconv" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/spec" -) - -const ( - paths = "paths" - responses = "responses" - parameters = "parameters" - definitions = "definitions" -) - -var ( - ignoredKeys map[string]struct{} - validMethods map[string]struct{} -) - -func init() { - ignoredKeys = map[string]struct{}{ - "schema": {}, - "properties": {}, - "not": {}, - "anyOf": {}, - "oneOf": {}, - } - - validMethods = map[string]struct{}{ - "GET": {}, - "HEAD": {}, - "OPTIONS": {}, - "PATCH": {}, - "POST": {}, - "PUT": {}, - "DELETE": {}, - } -} - -// Key represent a key item constructed from /-separated segments -type Key struct { - Segments int - Key string -} - -// Keys is a sortable collable collection of Keys -type Keys []Key - -func (k Keys) Len() int { return len(k) } -func (k Keys) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k Keys) Less(i, j int) bool { - return k[i].Segments > k[j].Segments || (k[i].Segments == k[j].Segments && k[i].Key < k[j].Key) -} - -// KeyParts construct a SplitKey with all its /-separated segments decomposed. It is sortable. -func KeyParts(key string) SplitKey { - var res []string - for _, part := range strings.Split(key[1:], "/") { - if part != "" { - res = append(res, jsonpointer.Unescape(part)) - } - } - - return res -} - -// SplitKey holds of the parts of a /-separated key, soi that their location may be determined. -type SplitKey []string - -// IsDefinition is true when the split key is in the #/definitions section of a spec -func (s SplitKey) IsDefinition() bool { - return len(s) > 1 && s[0] == definitions -} - -// DefinitionName yields the name of the definition -func (s SplitKey) DefinitionName() string { - if !s.IsDefinition() { - return "" - } - - return s[1] -} - -func (s SplitKey) isKeyName(i int) bool { - if i <= 0 { - return false - } - - count := 0 - for idx := i - 1; idx > 0; idx-- { - if s[idx] != "properties" { - break - } - count++ - } - - return count%2 != 0 -} - -// PartAdder know how to construct the components of a new name -type PartAdder func(string) []string - -// BuildName builds a name from segments -func (s SplitKey) BuildName(segments []string, startIndex int, adder PartAdder) string { - for i, part := range s[startIndex:] { - if _, ignored := ignoredKeys[part]; !ignored || s.isKeyName(startIndex+i) { - segments = append(segments, adder(part)...) - } - } - - return strings.Join(segments, " ") -} - -// IsOperation is true when the split key is in the operations section -func (s SplitKey) IsOperation() bool { - return len(s) > 1 && s[0] == paths -} - -// IsSharedOperationParam is true when the split key is in the parameters section of a path -func (s SplitKey) IsSharedOperationParam() bool { - return len(s) > 2 && s[0] == paths && s[2] == parameters -} - -// IsSharedParam is true when the split key is in the #/parameters section of a spec -func (s SplitKey) IsSharedParam() bool { - return len(s) > 1 && s[0] == parameters -} - -// IsOperationParam is true when the split key is in the parameters section of an operation -func (s SplitKey) IsOperationParam() bool { - return len(s) > 3 && s[0] == paths && s[3] == parameters -} - -// IsOperationResponse is true when the split key is in the responses section of an operation -func (s SplitKey) IsOperationResponse() bool { - return len(s) > 3 && s[0] == paths && s[3] == responses -} - -// IsSharedResponse is true when the split key is in the #/responses section of a spec -func (s SplitKey) IsSharedResponse() bool { - return len(s) > 1 && s[0] == responses -} - -// IsDefaultResponse is true when the split key is the default response for an operation -func (s SplitKey) IsDefaultResponse() bool { - return len(s) > 4 && s[0] == paths && s[3] == responses && s[4] == "default" -} - -// IsStatusCodeResponse is true when the split key is an operation response with a status code -func (s SplitKey) IsStatusCodeResponse() bool { - isInt := func() bool { - _, err := strconv.Atoi(s[4]) - - return err == nil - } - - return len(s) > 4 && s[0] == paths && s[3] == responses && isInt() -} - -// ResponseName yields either the status code or "Default" for a response -func (s SplitKey) ResponseName() string { - if s.IsStatusCodeResponse() { - code, _ := strconv.Atoi(s[4]) - - return http.StatusText(code) - } - - if s.IsDefaultResponse() { - return "Default" - } - - return "" -} - -// PathItemRef constructs a $ref object from a split key of the form /{path}/{method} -func (s SplitKey) PathItemRef() spec.Ref { - if len(s) < 3 { - return spec.Ref{} - } - - pth, method := s[1], s[2] - if _, isValidMethod := validMethods[strings.ToUpper(method)]; !isValidMethod && !strings.HasPrefix(method, "x-") { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(pth), strings.ToUpper(method))) -} - -// PathRef constructs a $ref object from a split key of the form /paths/{reference} -func (s SplitKey) PathRef() spec.Ref { - if !s.IsOperation() { - return spec.Ref{} - } - - return spec.MustCreateRef("#" + path.Join("/", paths, jsonpointer.Escape(s[1]))) -} diff --git a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go b/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go deleted file mode 100644 index 73243df87..000000000 --- a/vendor/github.com/go-openapi/analysis/internal/flatten/sortref/sort_ref.go +++ /dev/null @@ -1,141 +0,0 @@ -package sortref - -import ( - "reflect" - "sort" - "strings" - - "github.com/go-openapi/analysis/internal/flatten/normalize" - "github.com/go-openapi/spec" -) - -var depthGroupOrder = []string{ - "sharedParam", "sharedResponse", "sharedOpParam", "opParam", "codeResponse", "defaultResponse", "definition", -} - -type mapIterator struct { - len int - mapIter *reflect.MapIter -} - -func (i *mapIterator) Next() bool { - return i.mapIter.Next() -} - -func (i *mapIterator) Len() int { - return i.len -} - -func (i *mapIterator) Key() string { - return i.mapIter.Key().String() -} - -func mustMapIterator(anyMap interface{}) *mapIterator { - val := reflect.ValueOf(anyMap) - - return &mapIterator{mapIter: val.MapRange(), len: val.Len()} -} - -// DepthFirst sorts a map of anything. It groups keys by category -// (shared params, op param, statuscode response, default response, definitions) -// sort groups internally by number of parts in the key and lexical names -// flatten groups into a single list of keys -func DepthFirst(in interface{}) []string { - iterator := mustMapIterator(in) - sorted := make([]string, 0, iterator.Len()) - grouped := make(map[string]Keys, iterator.Len()) - - for iterator.Next() { - k := iterator.Key() - split := KeyParts(k) - var pk string - - if split.IsSharedOperationParam() { - pk = "sharedOpParam" - } - if split.IsOperationParam() { - pk = "opParam" - } - if split.IsStatusCodeResponse() { - pk = "codeResponse" - } - if split.IsDefaultResponse() { - pk = "defaultResponse" - } - if split.IsDefinition() { - pk = "definition" - } - if split.IsSharedParam() { - pk = "sharedParam" - } - if split.IsSharedResponse() { - pk = "sharedResponse" - } - grouped[pk] = append(grouped[pk], Key{Segments: len(split), Key: k}) - } - - for _, pk := range depthGroupOrder { - res := grouped[pk] - sort.Sort(res) - - for _, v := range res { - sorted = append(sorted, v.Key) - } - } - - return sorted -} - -// topMostRefs is able to sort refs by hierarchical then lexicographic order, -// yielding refs ordered breadth-first. -type topmostRefs []string - -func (k topmostRefs) Len() int { return len(k) } -func (k topmostRefs) Swap(i, j int) { k[i], k[j] = k[j], k[i] } -func (k topmostRefs) Less(i, j int) bool { - li, lj := len(strings.Split(k[i], "/")), len(strings.Split(k[j], "/")) - if li == lj { - return k[i] < k[j] - } - - return li < lj -} - -// TopmostFirst sorts references by depth -func TopmostFirst(refs []string) []string { - res := topmostRefs(refs) - sort.Sort(res) - - return res -} - -// RefRevIdx is a reverse index for references -type RefRevIdx struct { - Ref spec.Ref - Keys []string -} - -// ReverseIndex builds a reverse index for references in schemas -func ReverseIndex(schemas map[string]spec.Ref, basePath string) map[string]RefRevIdx { - collected := make(map[string]RefRevIdx) - for key, schRef := range schemas { - // normalize paths before sorting, - // so we get together keys that are from the same external file - normalizedPath := normalize.Path(schRef, basePath) - - entry, ok := collected[normalizedPath] - if ok { - entry.Keys = append(entry.Keys, key) - collected[normalizedPath] = entry - - continue - } - - collected[normalizedPath] = RefRevIdx{ - Ref: schRef, - Keys: []string{key}, - } - } - - return collected -} diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go deleted file mode 100644 index b25305264..000000000 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package analysis - -import ( - "fmt" - "reflect" - - "github.com/go-openapi/spec" -) - -// Mixin modifies the primary swagger spec by adding the paths and -// definitions from the mixin specs. Top level parameters and -// responses from the mixins are also carried over. Operation id -// collisions are avoided by appending "Mixin" but only if -// needed. -// -// The following parts of primary are subject to merge, filling empty details -// - Info -// - BasePath -// - Host -// - ExternalDocs -// -// Consider calling FixEmptyResponseDescriptions() on the modified primary -// if you read them from storage and they are valid to start with. -// -// Entries in "paths", "definitions", "parameters" and "responses" are -// added to the primary in the order of the given mixins. If the entry -// already exists in primary it is skipped with a warning message. -// -// The count of skipped entries (from collisions) is returned so any -// deviation from the number expected can flag a warning in your build -// scripts. Carefully review the collisions before accepting them; -// consider renaming things if possible. -// -// No key normalization takes place (paths, type defs, -// etc). Ensure they are canonical if your downstream tools do -// key normalization of any form. -// -// Merging schemes (http, https), and consumers/producers do not account for -// collisions. -func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { - skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) - initPrimary(primary) - - for i, m := range mixins { - skipped = append(skipped, mergeSwaggerProps(primary, m)...) - - skipped = append(skipped, mergeConsumes(primary, m)...) - - skipped = append(skipped, mergeProduces(primary, m)...) - - skipped = append(skipped, mergeTags(primary, m)...) - - skipped = append(skipped, mergeSchemes(primary, m)...) - - skipped = append(skipped, mergeSecurityDefinitions(primary, m)...) - - skipped = append(skipped, mergeSecurityRequirements(primary, m)...) - - skipped = append(skipped, mergeDefinitions(primary, m)...) - - // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) - - skipped = append(skipped, mergeParameters(primary, m)...) - - skipped = append(skipped, mergeResponses(primary, m)...) - } - - return skipped -} - -// getOpIds extracts all the paths..operationIds from the given -// spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { - rv := make(map[string]bool) - if s.Paths == nil { - return rv - } - - for _, v := range s.Paths.Paths { - piops := pathItemOps(v) - - for _, op := range piops { - rv[op.ID] = true - } - } - - return rv -} - -func pathItemOps(p spec.PathItem) []*spec.Operation { - var rv []*spec.Operation - rv = appendOp(rv, p.Get) - rv = appendOp(rv, p.Put) - rv = appendOp(rv, p.Post) - rv = appendOp(rv, p.Delete) - rv = appendOp(rv, p.Head) - rv = appendOp(rv, p.Patch) - - return rv -} - -func appendOp(ops []*spec.Operation, op *spec.Operation) []*spec.Operation { - if op == nil { - return ops - } - - return append(ops, op) -} - -func mergeSecurityDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.SecurityDefinitions { - if _, exists := primary.SecurityDefinitions[k]; exists { - warn := fmt.Sprintf( - "SecurityDefinitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - primary.SecurityDefinitions[k] = v - } - - return -} - -func mergeSecurityRequirements(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Security { - found := false - for _, vv := range primary.Security { - if reflect.DeepEqual(v, vv) { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "Security requirement: '%v' already exists in primary or higher priority mixin, skipping\n", v) - skipped = append(skipped, warn) - - continue - } - primary.Security = append(primary.Security, v) - } - - return -} - -func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Definitions { - // assume name collisions represent IDENTICAL type. careful. - if _, exists := primary.Definitions[k]; exists { - warn := fmt.Sprintf( - "definitions entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Definitions[k] = v - } - - return -} - -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { - if m.Paths != nil { - for k, v := range m.Paths.Paths { - if _, exists := primary.Paths.Paths[k]; exists { - warn := fmt.Sprintf( - "paths entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - - // Swagger requires that operationIds be - // unique within a spec. If we find a - // collision we append "Mixin0" to the - // operatoinId we are adding, where 0 is mixin - // index. We assume that operationIds with - // all the proivded specs are already unique. - piops := pathItemOps(v) - for _, piop := range piops { - if opIds[piop.ID] { - piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) - } - opIds[piop.ID] = true - } - primary.Paths.Paths[k] = v - } - } - - return -} - -func mergeParameters(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Parameters { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Parameters[k]; exists { - warn := fmt.Sprintf( - "top level parameters entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Parameters[k] = v - } - - return -} - -func mergeResponses(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for k, v := range m.Responses { - // could try to rename on conflict but would - // have to fix $refs in the mixin. Complain - // for now - if _, exists := primary.Responses[k]; exists { - warn := fmt.Sprintf( - "top level responses entry '%v' already exists in primary or higher priority mixin, skipping\n", k) - skipped = append(skipped, warn) - - continue - } - primary.Responses[k] = v - } - - return skipped -} - -func mergeConsumes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Consumes { - found := false - for _, vv := range primary.Consumes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Consumes = append(primary.Consumes, v) - } - - return []string{} -} - -func mergeProduces(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Produces { - found := false - for _, vv := range primary.Produces { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Produces = append(primary.Produces, v) - } - - return []string{} -} - -func mergeTags(primary *spec.Swagger, m *spec.Swagger) (skipped []string) { - for _, v := range m.Tags { - found := false - for _, vv := range primary.Tags { - if v.Name == vv.Name { - found = true - - break - } - } - - if found { - warn := fmt.Sprintf( - "top level tags entry with name '%v' already exists in primary or higher priority mixin, skipping\n", - v.Name, - ) - skipped = append(skipped, warn) - - continue - } - - primary.Tags = append(primary.Tags, v) - } - - return -} - -func mergeSchemes(primary *spec.Swagger, m *spec.Swagger) []string { - for _, v := range m.Schemes { - found := false - for _, vv := range primary.Schemes { - if v == vv { - found = true - - break - } - } - - if found { - // no warning here: we just skip it - continue - } - primary.Schemes = append(primary.Schemes, v) - } - - return []string{} -} - -func mergeSwaggerProps(primary *spec.Swagger, m *spec.Swagger) []string { - var skipped, skippedInfo, skippedDocs []string - - primary.Extensions, skipped = mergeExtensions(primary.Extensions, m.Extensions) - - // merging details in swagger top properties - if primary.Host == "" { - primary.Host = m.Host - } - - if primary.BasePath == "" { - primary.BasePath = m.BasePath - } - - if primary.Info == nil { - primary.Info = m.Info - } else if m.Info != nil { - skippedInfo = mergeInfo(primary.Info, m.Info) - skipped = append(skipped, skippedInfo...) - } - - if primary.ExternalDocs == nil { - primary.ExternalDocs = m.ExternalDocs - } else if m != nil { - skippedDocs = mergeExternalDocs(primary.ExternalDocs, m.ExternalDocs) - skipped = append(skipped, skippedDocs...) - } - - return skipped -} - -// nolint: unparam -func mergeExternalDocs(primary *spec.ExternalDocumentation, m *spec.ExternalDocumentation) []string { - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.URL == "" { - primary.URL = m.URL - } - - return nil -} - -func mergeInfo(primary *spec.Info, m *spec.Info) []string { - var sk, skipped []string - - primary.Extensions, sk = mergeExtensions(primary.Extensions, m.Extensions) - skipped = append(skipped, sk...) - - if primary.Description == "" { - primary.Description = m.Description - } - - if primary.Title == "" { - primary.Description = m.Description - } - - if primary.TermsOfService == "" { - primary.TermsOfService = m.TermsOfService - } - - if primary.Version == "" { - primary.Version = m.Version - } - - if primary.Contact == nil { - primary.Contact = m.Contact - } else if m.Contact != nil { - var csk []string - primary.Contact.Extensions, csk = mergeExtensions(primary.Contact.Extensions, m.Contact.Extensions) - skipped = append(skipped, csk...) - - if primary.Contact.Name == "" { - primary.Contact.Name = m.Contact.Name - } - - if primary.Contact.URL == "" { - primary.Contact.URL = m.Contact.URL - } - - if primary.Contact.Email == "" { - primary.Contact.Email = m.Contact.Email - } - } - - if primary.License == nil { - primary.License = m.License - } else if m.License != nil { - var lsk []string - primary.License.Extensions, lsk = mergeExtensions(primary.License.Extensions, m.License.Extensions) - skipped = append(skipped, lsk...) - - if primary.License.Name == "" { - primary.License.Name = m.License.Name - } - - if primary.License.URL == "" { - primary.License.URL = m.License.URL - } - } - - return skipped -} - -func mergeExtensions(primary spec.Extensions, m spec.Extensions) (result spec.Extensions, skipped []string) { - if primary == nil { - result = m - - return - } - - if m == nil { - result = primary - - return - } - - result = primary - for k, v := range m { - if _, found := primary[k]; found { - skipped = append(skipped, k) - - continue - } - - primary[k] = v - } - - return -} - -func initPrimary(primary *spec.Swagger) { - if primary.SecurityDefinitions == nil { - primary.SecurityDefinitions = make(map[string]*spec.SecurityScheme) - } - - if primary.Security == nil { - primary.Security = make([]map[string][]string, 0, 10) - } - - if primary.Produces == nil { - primary.Produces = make([]string, 0, 10) - } - - if primary.Consumes == nil { - primary.Consumes = make([]string, 0, 10) - } - - if primary.Tags == nil { - primary.Tags = make([]spec.Tag, 0, 10) - } - - if primary.Schemes == nil { - primary.Schemes = make([]string, 0, 10) - } - - if primary.Paths == nil { - primary.Paths = &spec.Paths{Paths: make(map[string]spec.PathItem)} - } - - if primary.Paths.Paths == nil { - primary.Paths.Paths = make(map[string]spec.PathItem) - } - - if primary.Definitions == nil { - primary.Definitions = make(spec.Definitions) - } - - if primary.Parameters == nil { - primary.Parameters = make(map[string]spec.Parameter) - } - - if primary.Responses == nil { - primary.Responses = make(map[string]spec.Response) - } -} diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go deleted file mode 100644 index fc055095c..000000000 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ /dev/null @@ -1,256 +0,0 @@ -package analysis - -import ( - "fmt" - - "github.com/go-openapi/spec" - "github.com/go-openapi/strfmt" -) - -// SchemaOpts configures the schema analyzer -type SchemaOpts struct { - Schema *spec.Schema - Root interface{} - BasePath string - _ struct{} -} - -// Schema analysis, will classify the schema according to known -// patterns. -func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { - if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") - } - - a := &AnalyzedSchema{ - schema: opts.Schema, - root: opts.Root, - basePath: opts.BasePath, - } - - a.initializeFlags() - a.inferKnownType() - a.inferEnum() - a.inferBaseType() - - if err := a.inferMap(); err != nil { - return nil, err - } - if err := a.inferArray(); err != nil { - return nil, err - } - - a.inferTuple() - - if err := a.inferFromRef(); err != nil { - return nil, err - } - - a.inferSimpleSchema() - - return a, nil -} - -// AnalyzedSchema indicates what the schema represents -type AnalyzedSchema struct { - schema *spec.Schema - root interface{} - basePath string - - hasProps bool - hasAllOf bool - hasItems bool - hasAdditionalProps bool - hasAdditionalItems bool - hasRef bool - - IsKnownType bool - IsSimpleSchema bool - IsArray bool - IsSimpleArray bool - IsMap bool - IsSimpleMap bool - IsExtendedObject bool - IsTuple bool - IsTupleWithExtra bool - IsBaseType bool - IsEnum bool -} - -// Inherits copies value fields from other onto this schema -func (a *AnalyzedSchema) inherits(other *AnalyzedSchema) { - if other == nil { - return - } - a.hasProps = other.hasProps - a.hasAllOf = other.hasAllOf - a.hasItems = other.hasItems - a.hasAdditionalItems = other.hasAdditionalItems - a.hasAdditionalProps = other.hasAdditionalProps - a.hasRef = other.hasRef - - a.IsKnownType = other.IsKnownType - a.IsSimpleSchema = other.IsSimpleSchema - a.IsArray = other.IsArray - a.IsSimpleArray = other.IsSimpleArray - a.IsMap = other.IsMap - a.IsSimpleMap = other.IsSimpleMap - a.IsExtendedObject = other.IsExtendedObject - a.IsTuple = other.IsTuple - a.IsTupleWithExtra = other.IsTupleWithExtra - a.IsBaseType = other.IsBaseType - a.IsEnum = other.IsEnum -} - -func (a *AnalyzedSchema) inferFromRef() error { - if a.hasRef { - sch := new(spec.Schema) - sch.Ref = a.schema.Ref - err := spec.ExpandSchema(sch, a.root, nil) - if err != nil { - return err - } - rsch, err := Schema(SchemaOpts{ - Schema: sch, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - // NOTE(fredbi): currently the only cause for errors is - // unresolved ref. Since spec.ExpandSchema() expands the - // schema recursively, there is no chance to get there, - // until we add more causes for error in this schema analysis. - return err - } - a.inherits(rsch) - } - - return nil -} - -func (a *AnalyzedSchema) inferSimpleSchema() { - a.IsSimpleSchema = a.IsKnownType || a.IsSimpleArray || a.IsSimpleMap -} - -func (a *AnalyzedSchema) inferKnownType() { - tpe := a.schema.Type - format := a.schema.Format - a.IsKnownType = tpe.Contains("boolean") || - tpe.Contains("integer") || - tpe.Contains("number") || - tpe.Contains("string") || - (format != "" && strfmt.Default.ContainsName(format)) || - (a.isObjectType() && !a.hasProps && !a.hasAllOf && !a.hasAdditionalProps && !a.hasAdditionalItems) -} - -func (a *AnalyzedSchema) inferMap() error { - if !a.isObjectType() { - return nil - } - - hasExtra := a.hasProps || a.hasAllOf - a.IsMap = a.hasAdditionalProps && !hasExtra - a.IsExtendedObject = a.hasAdditionalProps && hasExtra - - if !a.IsMap { - return nil - } - - // maps - if a.schema.AdditionalProperties.Schema != nil { - msch, err := Schema(SchemaOpts{ - Schema: a.schema.AdditionalProperties.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - a.IsSimpleMap = msch.IsSimpleSchema - } else if a.schema.AdditionalProperties.Allows { - a.IsSimpleMap = true - } - - return nil -} - -func (a *AnalyzedSchema) inferArray() error { - // an array has Items defined as an object schema, otherwise we qualify this JSON array as a tuple - // (yes, even if the Items array contains only one element). - // arrays in JSON schema may be unrestricted (i.e no Items specified). - // Note that arrays in Swagger MUST have Items. Nonetheless, we analyze unrestricted arrays. - // - // NOTE: the spec package misses the distinction between: - // items: [] and items: {}, so we consider both arrays here. - a.IsArray = a.isArrayType() && (a.schema.Items == nil || a.schema.Items.Schemas == nil) - if a.IsArray && a.hasItems { - if a.schema.Items.Schema != nil { - itsch, err := Schema(SchemaOpts{ - Schema: a.schema.Items.Schema, - Root: a.root, - BasePath: a.basePath, - }) - if err != nil { - return err - } - - a.IsSimpleArray = itsch.IsSimpleSchema - } - } - - if a.IsArray && !a.hasItems { - a.IsSimpleArray = true - } - - return nil -} - -func (a *AnalyzedSchema) inferTuple() { - tuple := a.hasItems && a.schema.Items.Schemas != nil - a.IsTuple = tuple && !a.hasAdditionalItems - a.IsTupleWithExtra = tuple && a.hasAdditionalItems -} - -func (a *AnalyzedSchema) inferBaseType() { - if a.isObjectType() { - a.IsBaseType = a.schema.Discriminator != "" - } -} - -func (a *AnalyzedSchema) inferEnum() { - a.IsEnum = len(a.schema.Enum) > 0 -} - -func (a *AnalyzedSchema) initializeFlags() { - a.hasProps = len(a.schema.Properties) > 0 - a.hasAllOf = len(a.schema.AllOf) > 0 - a.hasRef = a.schema.Ref.String() != "" - - a.hasItems = a.schema.Items != nil && - (a.schema.Items.Schema != nil || len(a.schema.Items.Schemas) > 0) - - a.hasAdditionalProps = a.schema.AdditionalProperties != nil && - (a.schema.AdditionalProperties.Schema != nil || a.schema.AdditionalProperties.Allows) - - a.hasAdditionalItems = a.schema.AdditionalItems != nil && - (a.schema.AdditionalItems.Schema != nil || a.schema.AdditionalItems.Allows) -} - -func (a *AnalyzedSchema) isObjectType() bool { - return !a.hasRef && (a.schema.Type == nil || a.schema.Type.Contains("") || a.schema.Type.Contains("object")) -} - -func (a *AnalyzedSchema) isArrayType() bool { - return !a.hasRef && (a.schema.Type != nil && a.schema.Type.Contains("array")) -} - -// isAnalyzedAsComplex determines if an analyzed schema is eligible to flattening (i.e. it is "complex"). -// -// Complex means the schema is any of: -// - a simple type (primitive) -// - an array of something (items are possibly complex ; if this is the case, items will generate a definition) -// - a map of something (additionalProperties are possibly complex ; if this is the case, additionalProperties will -// generate a definition) -func (a *AnalyzedSchema) isAnalyzedAsComplex() bool { - return !a.IsSimpleSchema && !a.IsArray && !a.IsMap -} diff --git a/vendor/github.com/go-openapi/errors/.gitignore b/vendor/github.com/go-openapi/errors/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/errors/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/errors/.golangci.yml b/vendor/github.com/go-openapi/errors/.golangci.yml deleted file mode 100644 index f9381aee5..000000000 --- a/vendor/github.com/go-openapi/errors/.golangci.yml +++ /dev/null @@ -1,41 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint diff --git a/vendor/github.com/go-openapi/errors/.travis.yml b/vendor/github.com/go-openapi/errors/.travis.yml deleted file mode 100644 index e4a01bfd4..000000000 --- a/vendor/github.com/go-openapi/errors/.travis.yml +++ /dev/null @@ -1,30 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: - - amd64 -jobs: - include: - # only run fast tests on ppc64le - - go: 1.x - arch: ppc64le - script: - - gotestsum -f short-verbose -- ./... - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: gZGp9NaHxi7zawlXJXKY92BGeDR1x0tbIcTyU5nMKLq0fhIaiEBJEeALwZ4VgqsSv3DytSSF5mLH8fevAM3ixE6hxjKQ+lQuf7V/w3btCN1CSWgoua5LOh1kTnqZQtJuRvO4pzoJcT3bJWBsVZ07VGNVzzJEy/zAKCHFqBUCXShw7QemlLBcYWFNqveTlvDIfCzvouoLnPoXwxEpkjxe9uz/ZKZgAnup/fXjC8RFctmgCnkCyvJTk0Y/fZCsufixJrJhshBWTnlrFCzRmgNkz2d+i1Ls3+MJ5EJJ2Tx/A5S63dL49J1f9Kr0AKHADmulSy8JNzIckKwbyFMYUecrsW+Lsu9DhnVMy1jj5pKsJDLRi2iIU3fXTMWbcyQbXjbbnBO2mPdP3Tzme75y4D9fc8hUPeyqVv2BU26NEbQ7EF2pKJ93OXvci7HlwRBgdJa8j6mP2LEDClcPQW00g7N/OZe0cTOMa8L5AwiBlbArwqt9wv6YLJoTG0wpDhzWsFvbCg5bJxe28Yn3fIDD0Lk1I7iSnBbp/5gzF19jmxqvcT8tHRkDL4xfjbENFTZjA5uB4Z4pj4WSyWQILLV/Jwhe3fi9uQwdviFHfj5pnVrmNUiGSOQL672K5wl2c3E9mGwejvsu2dfEz28n7Y/FUnOpY3/cBS0n27JJaerS0zMKNLE= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/errors/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/errors/LICENSE b/vendor/github.com/go-openapi/errors/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/errors/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/errors/README.md b/vendor/github.com/go-openapi/errors/README.md deleted file mode 100644 index 0ce50b23b..000000000 --- a/vendor/github.com/go-openapi/errors/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# OpenAPI errors [![Build Status](https://travis-ci.org/go-openapi/errors.svg?branch=master)](https://travis-ci.org/go-openapi/errors) [![codecov](https://codecov.io/gh/go-openapi/errors/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/errors) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/errors/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/errors?status.svg)](http://godoc.org/github.com/go-openapi/errors) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/errors.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/errors)](https://goreportcard.com/report/github.com/go-openapi/errors) - -Shared errors and error interface used throughout the various libraries found in the go-openapi toolkit. diff --git a/vendor/github.com/go-openapi/errors/api.go b/vendor/github.com/go-openapi/errors/api.go deleted file mode 100644 index 7667cee76..000000000 --- a/vendor/github.com/go-openapi/errors/api.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "encoding/json" - "fmt" - "net/http" - "reflect" - "strings" -) - -// DefaultHTTPCode is used when the error Code cannot be used as an HTTP code. -var DefaultHTTPCode = http.StatusUnprocessableEntity - -// Error represents a error interface all swagger framework errors implement -type Error interface { - error - Code() int32 -} - -type apiError struct { - code int32 - message string -} - -func (a *apiError) Error() string { - return a.message -} - -func (a *apiError) Code() int32 { - return a.code -} - -// New creates a new API error with a code and a message -func New(code int32, message string, args ...interface{}) Error { - if len(args) > 0 { - return &apiError{code, fmt.Sprintf(message, args...)} - } - return &apiError{code, message} -} - -// NotFound creates a new not found error -func NotFound(message string, args ...interface{}) Error { - if message == "" { - message = "Not found" - } - return New(http.StatusNotFound, fmt.Sprintf(message, args...)) -} - -// NotImplemented creates a new not implemented error -func NotImplemented(message string) Error { - return New(http.StatusNotImplemented, message) -} - -// MethodNotAllowedError represents an error for when the path matches but the method doesn't -type MethodNotAllowedError struct { - code int32 - Allowed []string - message string -} - -func (m *MethodNotAllowedError) Error() string { - return m.message -} - -// Code the error code -func (m *MethodNotAllowedError) Code() int32 { - return m.code -} - -func errorAsJSON(err Error) []byte { - b, _ := json.Marshal(struct { - Code int32 `json:"code"` - Message string `json:"message"` - }{err.Code(), err.Error()}) - return b -} - -func flattenComposite(errs *CompositeError) *CompositeError { - var res []error - for _, er := range errs.Errors { - switch e := er.(type) { - case *CompositeError: - if len(e.Errors) > 0 { - flat := flattenComposite(e) - if len(flat.Errors) > 0 { - res = append(res, flat.Errors...) - } - } - default: - if e != nil { - res = append(res, e) - } - } - } - return CompositeValidationError(res...) -} - -// MethodNotAllowed creates a new method not allowed error -func MethodNotAllowed(requested string, allow []string) Error { - msg := fmt.Sprintf("method %s is not allowed, but [%s] are", requested, strings.Join(allow, ",")) - return &MethodNotAllowedError{code: http.StatusMethodNotAllowed, Allowed: allow, message: msg} -} - -// ServeError the error handler interface implementation -func ServeError(rw http.ResponseWriter, r *http.Request, err error) { - rw.Header().Set("Content-Type", "application/json") - switch e := err.(type) { - case *CompositeError: - er := flattenComposite(e) - // strips composite errors to first element only - if len(er.Errors) > 0 { - ServeError(rw, r, er.Errors[0]) - } else { - // guard against empty CompositeError (invalid construct) - ServeError(rw, r, nil) - } - case *MethodNotAllowedError: - rw.Header().Add("Allow", strings.Join(err.(*MethodNotAllowedError).Allowed, ",")) - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case Error: - value := reflect.ValueOf(e) - if value.Kind() == reflect.Ptr && value.IsNil() { - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - return - } - rw.WriteHeader(asHTTPCode(int(e.Code()))) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(e)) - } - case nil: - rw.WriteHeader(http.StatusInternalServerError) - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, "Unknown error"))) - default: - rw.WriteHeader(http.StatusInternalServerError) - if r == nil || r.Method != http.MethodHead { - _, _ = rw.Write(errorAsJSON(New(http.StatusInternalServerError, err.Error()))) - } - } -} - -func asHTTPCode(input int) int { - if input >= 600 { - return DefaultHTTPCode - } - return input -} diff --git a/vendor/github.com/go-openapi/errors/auth.go b/vendor/github.com/go-openapi/errors/auth.go deleted file mode 100644 index 0545b501b..000000000 --- a/vendor/github.com/go-openapi/errors/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "net/http" - -// Unauthenticated returns an unauthenticated error -func Unauthenticated(scheme string) Error { - return New(http.StatusUnauthorized, "unauthenticated for %s", scheme) -} diff --git a/vendor/github.com/go-openapi/errors/doc.go b/vendor/github.com/go-openapi/errors/doc.go deleted file mode 100644 index 963d42740..000000000 --- a/vendor/github.com/go-openapi/errors/doc.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* - -Package errors provides an Error interface and several concrete types -implementing this interface to manage API errors and JSON-schema validation -errors. - -A middleware handler ServeError() is provided to serve the errors types -it defines. - -It is used throughout the various go-openapi toolkit libraries -(https://github.com/go-openapi). - -*/ -package errors diff --git a/vendor/github.com/go-openapi/errors/headers.go b/vendor/github.com/go-openapi/errors/headers.go deleted file mode 100644 index 0360c094e..000000000 --- a/vendor/github.com/go-openapi/errors/headers.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "net/http" -) - -// Validation represents a failure of a precondition -type Validation struct { - code int32 - Name string - In string - Value interface{} - message string - Values []interface{} -} - -func (e *Validation) Error() string { - return e.message -} - -// Code the error code -func (e *Validation) Code() int32 { - return e.code -} - -// ValidateName produces an error message name for an aliased property -func (e *Validation) ValidateName(name string) *Validation { - if e.Name == "" && name != "" { - e.Name = name - e.message = name + e.message - } - return e -} - -const ( - contentTypeFail = `unsupported media type %q, only %v are allowed` - responseFormatFail = `unsupported media type requested, only %v are available` -) - -// InvalidContentType error for an invalid content type -func InvalidContentType(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusUnsupportedMediaType, - Name: "Content-Type", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(contentTypeFail, value, allowed), - } -} - -// InvalidResponseFormat error for an unacceptable response format request -func InvalidResponseFormat(value string, allowed []string) *Validation { - values := make([]interface{}, 0, len(allowed)) - for _, v := range allowed { - values = append(values, v) - } - return &Validation{ - code: http.StatusNotAcceptable, - Name: "Accept", - In: "header", - Value: value, - Values: values, - message: fmt.Sprintf(responseFormatFail, allowed), - } -} diff --git a/vendor/github.com/go-openapi/errors/middleware.go b/vendor/github.com/go-openapi/errors/middleware.go deleted file mode 100644 index 6390d4636..000000000 --- a/vendor/github.com/go-openapi/errors/middleware.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "bytes" - "fmt" - "strings" -) - -// APIVerificationFailed is an error that contains all the missing info for a mismatched section -// between the api registrations and the api spec -type APIVerificationFailed struct { - Section string - MissingSpecification []string - MissingRegistration []string -} - -// -func (v *APIVerificationFailed) Error() string { - buf := bytes.NewBuffer(nil) - - hasRegMissing := len(v.MissingRegistration) > 0 - hasSpecMissing := len(v.MissingSpecification) > 0 - - if hasRegMissing { - buf.WriteString(fmt.Sprintf("missing [%s] %s registrations", strings.Join(v.MissingRegistration, ", "), v.Section)) - } - - if hasRegMissing && hasSpecMissing { - buf.WriteString("\n") - } - - if hasSpecMissing { - buf.WriteString(fmt.Sprintf("missing from spec file [%s] %s", strings.Join(v.MissingSpecification, ", "), v.Section)) - } - - return buf.String() -} diff --git a/vendor/github.com/go-openapi/errors/parsing.go b/vendor/github.com/go-openapi/errors/parsing.go deleted file mode 100644 index 0f96ce209..000000000 --- a/vendor/github.com/go-openapi/errors/parsing.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import "fmt" - -// ParseError represents a parsing error -type ParseError struct { - code int32 - Name string - In string - Value string - Reason error - message string -} - -func (e *ParseError) Error() string { - return e.message -} - -// Code returns the http status code for this error -func (e *ParseError) Code() int32 { - return e.code -} - -const ( - parseErrorTemplContent = `parsing %s %s from %q failed, because %s` - parseErrorTemplContentNoIn = `parsing %s from %q failed, because %s` -) - -// NewParseError creates a new parse error -func NewParseError(name, in, value string, reason error) *ParseError { - var msg string - if in == "" { - msg = fmt.Sprintf(parseErrorTemplContentNoIn, name, value, reason) - } else { - msg = fmt.Sprintf(parseErrorTemplContent, name, in, value, reason) - } - return &ParseError{ - code: 400, - Name: name, - In: in, - Value: value, - Reason: reason, - message: msg, - } -} diff --git a/vendor/github.com/go-openapi/errors/schema.go b/vendor/github.com/go-openapi/errors/schema.go deleted file mode 100644 index f4a7d4ac2..000000000 --- a/vendor/github.com/go-openapi/errors/schema.go +++ /dev/null @@ -1,588 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package errors - -import ( - "fmt" - "strings" -) - -const ( - invalidType = "%s is an invalid type name" - typeFail = "%s in %s must be of type %s" - typeFailWithData = "%s in %s must be of type %s: %q" - typeFailWithError = "%s in %s must be of type %s, because: %s" - requiredFail = "%s in %s is required" - readOnlyFail = "%s in %s is readOnly" - tooLongMessage = "%s in %s should be at most %d chars long" - tooShortMessage = "%s in %s should be at least %d chars long" - patternFail = "%s in %s should match '%s'" - enumFail = "%s in %s should be one of %v" - multipleOfFail = "%s in %s should be a multiple of %v" - maxIncFail = "%s in %s should be less than or equal to %v" - maxExcFail = "%s in %s should be less than %v" - minIncFail = "%s in %s should be greater than or equal to %v" - minExcFail = "%s in %s should be greater than %v" - uniqueFail = "%s in %s shouldn't contain duplicates" - maxItemsFail = "%s in %s should have at most %d items" - minItemsFail = "%s in %s should have at least %d items" - typeFailNoIn = "%s must be of type %s" - typeFailWithDataNoIn = "%s must be of type %s: %q" - typeFailWithErrorNoIn = "%s must be of type %s, because: %s" - requiredFailNoIn = "%s is required" - readOnlyFailNoIn = "%s is readOnly" - tooLongMessageNoIn = "%s should be at most %d chars long" - tooShortMessageNoIn = "%s should be at least %d chars long" - patternFailNoIn = "%s should match '%s'" - enumFailNoIn = "%s should be one of %v" - multipleOfFailNoIn = "%s should be a multiple of %v" - maxIncFailNoIn = "%s should be less than or equal to %v" - maxExcFailNoIn = "%s should be less than %v" - minIncFailNoIn = "%s should be greater than or equal to %v" - minExcFailNoIn = "%s should be greater than %v" - uniqueFailNoIn = "%s shouldn't contain duplicates" - maxItemsFailNoIn = "%s should have at most %d items" - minItemsFailNoIn = "%s should have at least %d items" - noAdditionalItems = "%s in %s can't have additional items" - noAdditionalItemsNoIn = "%s can't have additional items" - tooFewProperties = "%s in %s should have at least %d properties" - tooFewPropertiesNoIn = "%s should have at least %d properties" - tooManyProperties = "%s in %s should have at most %d properties" - tooManyPropertiesNoIn = "%s should have at most %d properties" - unallowedProperty = "%s.%s in %s is a forbidden property" - unallowedPropertyNoIn = "%s.%s is a forbidden property" - failedAllPatternProps = "%s.%s in %s failed all pattern properties" - failedAllPatternPropsNoIn = "%s.%s failed all pattern properties" - multipleOfMustBePositive = "factor MultipleOf declared for %s must be positive: %v" -) - -// All code responses can be used to differentiate errors for different handling -// by the consuming program -const ( - // CompositeErrorCode remains 422 for backwards-compatibility - // and to separate it from validation errors with cause - CompositeErrorCode = 422 - // InvalidTypeCode is used for any subclass of invalid types - InvalidTypeCode = 600 + iota - RequiredFailCode - TooLongFailCode - TooShortFailCode - PatternFailCode - EnumFailCode - MultipleOfFailCode - MaxFailCode - MinFailCode - UniqueFailCode - MaxItemsFailCode - MinItemsFailCode - NoAdditionalItemsCode - TooFewPropertiesCode - TooManyPropertiesCode - UnallowedPropertyCode - FailedAllPatternPropsCode - MultipleOfMustBePositiveCode - ReadOnlyFailCode -) - -// CompositeError is an error that groups several errors together -type CompositeError struct { - Errors []error - code int32 - message string -} - -// Code for this error -func (c *CompositeError) Code() int32 { - return c.code -} - -func (c *CompositeError) Error() string { - if len(c.Errors) > 0 { - msgs := []string{c.message + ":"} - for _, e := range c.Errors { - msgs = append(msgs, e.Error()) - } - return strings.Join(msgs, "\n") - } - return c.message -} - -// CompositeValidationError an error to wrap a bunch of other errors -func CompositeValidationError(errors ...error) *CompositeError { - return &CompositeError{ - code: CompositeErrorCode, - Errors: append([]error{}, errors...), - message: "validation failure list", - } -} - -// FailedAllPatternProperties an error for when the property doesn't match a pattern -func FailedAllPatternProperties(name, in, key string) *Validation { - msg := fmt.Sprintf(failedAllPatternProps, name, key, in) - if in == "" { - msg = fmt.Sprintf(failedAllPatternPropsNoIn, name, key) - } - return &Validation{ - code: FailedAllPatternPropsCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// PropertyNotAllowed an error for when the property doesn't match a pattern -func PropertyNotAllowed(name, in, key string) *Validation { - msg := fmt.Sprintf(unallowedProperty, name, key, in) - if in == "" { - msg = fmt.Sprintf(unallowedPropertyNoIn, name, key) - } - return &Validation{ - code: UnallowedPropertyCode, - Name: name, - In: in, - Value: key, - message: msg, - } -} - -// TooFewProperties an error for an object with too few properties -func TooFewProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooFewProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooFewPropertiesNoIn, name, n) - } - return &Validation{ - code: TooFewPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// TooManyProperties an error for an object with too many properties -func TooManyProperties(name, in string, n int64) *Validation { - msg := fmt.Sprintf(tooManyProperties, name, in, n) - if in == "" { - msg = fmt.Sprintf(tooManyPropertiesNoIn, name, n) - } - return &Validation{ - code: TooManyPropertiesCode, - Name: name, - In: in, - Value: n, - message: msg, - } -} - -// AdditionalItemsNotAllowed an error for invalid additional items -func AdditionalItemsNotAllowed(name, in string) *Validation { - msg := fmt.Sprintf(noAdditionalItems, name, in) - if in == "" { - msg = fmt.Sprintf(noAdditionalItemsNoIn, name) - } - return &Validation{ - code: NoAdditionalItemsCode, - Name: name, - In: in, - message: msg, - } -} - -// InvalidCollectionFormat another flavor of invalid type error -func InvalidCollectionFormat(name, in, format string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: format, - message: fmt.Sprintf("the collection format %q is not supported for the %s param %q", format, in, name), - } -} - -// InvalidTypeName an error for when the type is invalid -func InvalidTypeName(typeName string) *Validation { - return &Validation{ - code: InvalidTypeCode, - Value: typeName, - message: fmt.Sprintf(invalidType, typeName), - } -} - -// InvalidType creates an error for when the type is invalid -func InvalidType(name, in, typeName string, value interface{}) *Validation { - var message string - - if in != "" { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithData, name, in, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithError, name, in, typeName, value) - default: - message = fmt.Sprintf(typeFail, name, in, typeName) - } - } else { - switch value.(type) { - case string: - message = fmt.Sprintf(typeFailWithDataNoIn, name, typeName, value) - case error: - message = fmt.Sprintf(typeFailWithErrorNoIn, name, typeName, value) - default: - message = fmt.Sprintf(typeFailNoIn, name, typeName) - } - } - - return &Validation{ - code: InvalidTypeCode, - Name: name, - In: in, - Value: value, - message: message, - } - -} - -// DuplicateItems error for when an array contains duplicates -func DuplicateItems(name, in string) *Validation { - msg := fmt.Sprintf(uniqueFail, name, in) - if in == "" { - msg = fmt.Sprintf(uniqueFailNoIn, name) - } - return &Validation{ - code: UniqueFailCode, - Name: name, - In: in, - message: msg, - } -} - -// TooManyItems error for when an array contains too many items -func TooManyItems(name, in string, max int64, value interface{}) *Validation { - msg := fmt.Sprintf(maxItemsFail, name, in, max) - if in == "" { - msg = fmt.Sprintf(maxItemsFailNoIn, name, max) - } - - return &Validation{ - code: MaxItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooFewItems error for when an array contains too few items -func TooFewItems(name, in string, min int64, value interface{}) *Validation { - msg := fmt.Sprintf(minItemsFail, name, in, min) - if in == "" { - msg = fmt.Sprintf(minItemsFailNoIn, name, min) - } - return &Validation{ - code: MinItemsFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ExceedsMaximumInt error for when maximum validation fails -func ExceedsMaximumInt(name, in string, max int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximumUint error for when maximum validation fails -func ExceedsMaximumUint(name, in string, max uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMaximum error for when maximum validation fails -func ExceedsMaximum(name, in string, max float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := maxIncFailNoIn - if exclusive { - m = maxExcFailNoIn - } - message = fmt.Sprintf(m, name, max) - } else { - m := maxIncFail - if exclusive { - m = maxExcFail - } - message = fmt.Sprintf(m, name, in, max) - } - return &Validation{ - code: MaxFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumInt error for when minimum validation fails -func ExceedsMinimumInt(name, in string, min int64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimumUint error for when minimum validation fails -func ExceedsMinimumUint(name, in string, min uint64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// ExceedsMinimum error for when minimum validation fails -func ExceedsMinimum(name, in string, min float64, exclusive bool, value interface{}) *Validation { - var message string - if in == "" { - m := minIncFailNoIn - if exclusive { - m = minExcFailNoIn - } - message = fmt.Sprintf(m, name, min) - } else { - m := minIncFail - if exclusive { - m = minExcFail - } - message = fmt.Sprintf(m, name, in, min) - } - return &Validation{ - code: MinFailCode, - Name: name, - In: in, - Value: value, - message: message, - } -} - -// NotMultipleOf error for when multiple of validation fails -func NotMultipleOf(name, in string, multiple, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(multipleOfFailNoIn, name, multiple) - } else { - msg = fmt.Sprintf(multipleOfFail, name, in, multiple) - } - return &Validation{ - code: MultipleOfFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// EnumFail error for when an enum validation fails -func EnumFail(name, in string, value interface{}, values []interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(enumFailNoIn, name, values) - } else { - msg = fmt.Sprintf(enumFail, name, in, values) - } - - return &Validation{ - code: EnumFailCode, - Name: name, - In: in, - Value: value, - Values: values, - message: msg, - } -} - -// Required error for when a value is missing -func Required(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(requiredFailNoIn, name) - } else { - msg = fmt.Sprintf(requiredFail, name, in) - } - return &Validation{ - code: RequiredFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// ReadOnly error for when a value is present in request -func ReadOnly(name, in string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(readOnlyFailNoIn, name) - } else { - msg = fmt.Sprintf(readOnlyFail, name, in) - } - return &Validation{ - code: ReadOnlyFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooLong error for when a string is too long -func TooLong(name, in string, max int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooLongMessageNoIn, name, max) - } else { - msg = fmt.Sprintf(tooLongMessage, name, in, max) - } - return &Validation{ - code: TooLongFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// TooShort error for when a string is too short -func TooShort(name, in string, min int64, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(tooShortMessageNoIn, name, min) - } else { - msg = fmt.Sprintf(tooShortMessage, name, in, min) - } - - return &Validation{ - code: TooShortFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// FailedPattern error for when a string fails a regex pattern match -// the pattern that is returned is the ECMA syntax version of the pattern not the golang version. -func FailedPattern(name, in, pattern string, value interface{}) *Validation { - var msg string - if in == "" { - msg = fmt.Sprintf(patternFailNoIn, name, pattern) - } else { - msg = fmt.Sprintf(patternFail, name, in, pattern) - } - - return &Validation{ - code: PatternFailCode, - Name: name, - In: in, - Value: value, - message: msg, - } -} - -// MultipleOfMustBePositive error for when a -// multipleOf factor is negative -func MultipleOfMustBePositive(name, in string, factor interface{}) *Validation { - return &Validation{ - code: MultipleOfMustBePositiveCode, - Name: name, - In: in, - Value: factor, - message: fmt.Sprintf(multipleOfMustBePositive, name, factor), - } -} diff --git a/vendor/github.com/go-openapi/loads/.editorconfig b/vendor/github.com/go-openapi/loads/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/loads/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/loads/.gitignore b/vendor/github.com/go-openapi/loads/.gitignore deleted file mode 100644 index e4f15f17b..000000000 --- a/vendor/github.com/go-openapi/loads/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -secrets.yml -coverage.out -profile.cov -profile.out diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml deleted file mode 100644 index d48b4a515..000000000 --- a/vendor/github.com/go-openapi/loads/.golangci.yml +++ /dev/null @@ -1,44 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 30 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoglobals - - gochecknoinits - - godox - - gocognit - - whitespace - - wsl - - funlen - - gochecknoglobals - - gochecknoinits - - scopelint - - wrapcheck - - exhaustivestruct - - exhaustive - - nlreturn - - testpackage - - gci - - gofumpt - - goerr113 - - gomnd - - tparallel - - nestif - - godot - - errorlint - - paralleltest diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml deleted file mode 100644 index cd4a7c331..000000000 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.16.x -- 1.x -install: -- go get gotest.tools/gotestsum -language: go -arch: -- amd64 -- ppc64le -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -notifications: - slack: - secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= -script: -- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/loads/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/loads/LICENSE b/vendor/github.com/go-openapi/loads/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/loads/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md deleted file mode 100644 index df1f62646..000000000 --- a/vendor/github.com/go-openapi/loads/README.md +++ /dev/null @@ -1,6 +0,0 @@ -# Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![Actions/Go Test Status](https://github.com/go-openapi/loads/workflows/Go%20Test/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"Go+Test") - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) - -Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go deleted file mode 100644 index 3046da4ce..000000000 --- a/vendor/github.com/go-openapi/loads/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package loads provides document loading methods for swagger (OAI) specifications. - -It is used by other go-openapi packages to load and run analysis on local or remote spec documents. - -*/ -package loads diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go deleted file mode 100644 index 44bd32b5b..000000000 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ /dev/null @@ -1,134 +0,0 @@ -package loads - -import ( - "encoding/json" - "errors" - "net/url" - - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -var ( - // Default chain of loaders, defined at the package level. - // - // By default this matches json and yaml documents. - // - // May be altered with AddLoader(). - loaders *loader -) - -func init() { - jsonLoader := &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: func(pth string) bool { - return true - }, - Fn: JSONDoc, - }, - } - - loaders = jsonLoader.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: swag.YAMLMatcher, - Fn: swag.YAMLDoc, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} - -// DocLoader represents a doc loader type -type DocLoader func(string) (json.RawMessage, error) - -// DocMatcher represents a predicate to check if a loader matches -type DocMatcher func(string) bool - -// DocLoaderWithMatch describes a loading function for a given extension match. -type DocLoaderWithMatch struct { - Fn DocLoader - Match DocMatcher -} - -// NewDocLoaderWithMatch builds a DocLoaderWithMatch to be used in load options -func NewDocLoaderWithMatch(fn DocLoader, matcher DocMatcher) DocLoaderWithMatch { - return DocLoaderWithMatch{ - Fn: fn, - Match: matcher, - } -} - -type loader struct { - DocLoaderWithMatch - Next *loader -} - -// WithHead adds a loader at the head of the current stack -func (l *loader) WithHead(head *loader) *loader { - if head == nil { - return l - } - head.Next = l - return head -} - -// WithNext adds a loader at the trail of the current stack -func (l *loader) WithNext(next *loader) *loader { - l.Next = next - return next -} - -// Load the raw document from path -func (l *loader) Load(path string) (json.RawMessage, error) { - _, erp := url.Parse(path) - if erp != nil { - return nil, erp - } - - var lastErr error = errors.New("no loader matched") // default error if no match was found - for ldr := l; ldr != nil; ldr = ldr.Next { - if ldr.Match != nil && !ldr.Match(path) { - continue - } - - // try then move to next one if there is an error - b, err := ldr.Fn(path) - if err == nil { - return b, nil - } - - lastErr = err - } - - return nil, lastErr -} - -// JSONDoc loads a json document from either a file or a remote url -func JSONDoc(path string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(path) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// AddLoader for a document, executed before other previously set loaders. -// -// This sets the configuration at the package level. -// -// NOTE: -// * this updates the default loader used by github.com/go-openapi/spec -// * since this sets package level globals, you shouln't call this concurrently -// -func AddLoader(predicate DocMatcher, load DocLoader) { - loaders = loaders.WithHead(&loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Match: predicate, - Fn: load, - }, - }) - - // sets the global default loader for go-openapi/spec - spec.PathLoader = loaders.Load -} diff --git a/vendor/github.com/go-openapi/loads/options.go b/vendor/github.com/go-openapi/loads/options.go deleted file mode 100644 index f8305d560..000000000 --- a/vendor/github.com/go-openapi/loads/options.go +++ /dev/null @@ -1,61 +0,0 @@ -package loads - -type options struct { - loader *loader -} - -func defaultOptions() *options { - return &options{ - loader: loaders, - } -} - -func loaderFromOptions(options []LoaderOption) *loader { - opts := defaultOptions() - for _, apply := range options { - apply(opts) - } - - return opts.loader -} - -// LoaderOption allows to fine-tune the spec loader behavior -type LoaderOption func(*options) - -// WithDocLoader sets a custom loader for loading specs -func WithDocLoader(l DocLoader) LoaderOption { - return func(opt *options) { - if l == nil { - return - } - opt.loader = &loader{ - DocLoaderWithMatch: DocLoaderWithMatch{ - Fn: l, - }, - } - } -} - -// WithDocLoaderMatches sets a chain of custom loaders for loading specs -// for different extension matches. -// -// Loaders are executed in the order of provided DocLoaderWithMatch'es. -func WithDocLoaderMatches(l ...DocLoaderWithMatch) LoaderOption { - return func(opt *options) { - var final, prev *loader - for _, ldr := range l { - if ldr.Fn == nil { - continue - } - - if prev == nil { - final = &loader{DocLoaderWithMatch: ldr} - prev = final - continue - } - - prev = prev.WithNext(&loader{DocLoaderWithMatch: ldr}) - } - opt.loader = final - } -} diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go deleted file mode 100644 index 93c8d4b89..000000000 --- a/vendor/github.com/go-openapi/loads/spec.go +++ /dev/null @@ -1,266 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package loads - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - - "github.com/go-openapi/analysis" - "github.com/go-openapi/spec" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// Document represents a swagger spec document -type Document struct { - // specAnalyzer - Analyzer *analysis.Spec - spec *spec.Swagger - specFilePath string - origSpec *spec.Swagger - schema *spec.Schema - raw json.RawMessage - pathLoader *loader -} - -// JSONSpec loads a spec from a json document -func JSONSpec(path string, options ...LoaderOption) (*Document, error) { - data, err := JSONDoc(path) - if err != nil { - return nil, err - } - // convert to json - return Analyzed(data, "", options...) -} - -// Embedded returns a Document based on embedded specs. No analysis is required -func Embedded(orig, flat json.RawMessage, options ...LoaderOption) (*Document, error) { - var origSpec, flatSpec spec.Swagger - if err := json.Unmarshal(orig, &origSpec); err != nil { - return nil, err - } - if err := json.Unmarshal(flat, &flatSpec); err != nil { - return nil, err - } - return &Document{ - raw: orig, - origSpec: &origSpec, - spec: &flatSpec, - pathLoader: loaderFromOptions(options), - }, nil -} - -// Spec loads a new spec document from a local or remote path -func Spec(path string, options ...LoaderOption) (*Document, error) { - - ldr := loaderFromOptions(options) - - b, err := ldr.Load(path) - if err != nil { - return nil, err - } - - document, err := Analyzed(b, "", options...) - if err != nil { - return nil, err - } - - if document != nil { - document.specFilePath = path - document.pathLoader = ldr - } - - return document, err -} - -// Analyzed creates a new analyzed spec document for a root json.RawMessage. -func Analyzed(data json.RawMessage, version string, options ...LoaderOption) (*Document, error) { - if version == "" { - version = "2.0" - } - if version != "2.0" { - return nil, fmt.Errorf("spec version %q is not supported", version) - } - - raw, err := trimData(data) // trim blanks, then convert yaml docs into json - if err != nil { - return nil, err - } - - swspec := new(spec.Swagger) - if err = json.Unmarshal(raw, swspec); err != nil { - return nil, err - } - - origsqspec, err := cloneSpec(swspec) - if err != nil { - return nil, err - } - - d := &Document{ - Analyzer: analysis.New(swspec), - schema: spec.MustLoadSwagger20Schema(), - spec: swspec, - raw: raw, - origSpec: origsqspec, - pathLoader: loaderFromOptions(options), - } - - return d, nil -} - -func trimData(in json.RawMessage) (json.RawMessage, error) { - trimmed := bytes.TrimSpace(in) - if len(trimmed) == 0 { - return in, nil - } - - if trimmed[0] == '{' || trimmed[0] == '[' { - return trimmed, nil - } - - // assume yaml doc: convert it to json - yml, err := swag.BytesToYAMLDoc(trimmed) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - d, err := swag.YAMLToJSON(yml) - if err != nil { - return nil, fmt.Errorf("analyzed: %v", err) - } - - return d, nil -} - -// Expanded expands the ref fields in the spec document and returns a new spec document -func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { - - swspec := new(spec.Swagger) - if err := json.Unmarshal(d.raw, swspec); err != nil { - return nil, err - } - - var expandOptions *spec.ExpandOptions - if len(options) > 0 { - expandOptions = options[0] - } else { - expandOptions = &spec.ExpandOptions{ - RelativeBase: d.specFilePath, - } - } - - if expandOptions.PathLoader == nil { - if d.pathLoader != nil { - // use loader from Document options - expandOptions.PathLoader = d.pathLoader.Load - } else { - // use package level loader - expandOptions.PathLoader = loaders.Load - } - } - - if err := spec.ExpandSpec(swspec, expandOptions); err != nil { - return nil, err - } - - dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - specFilePath: d.specFilePath, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, - } - return dd, nil -} - -// BasePath the base path for this spec -func (d *Document) BasePath() string { - return d.spec.BasePath -} - -// Version returns the version of this spec -func (d *Document) Version() string { - return d.spec.Swagger -} - -// Schema returns the swagger 2.0 schema -func (d *Document) Schema() *spec.Schema { - return d.schema -} - -// Spec returns the swagger spec object model -func (d *Document) Spec() *spec.Swagger { - return d.spec -} - -// Host returns the host for the API -func (d *Document) Host() string { - return d.spec.Host -} - -// Raw returns the raw swagger spec as json bytes -func (d *Document) Raw() json.RawMessage { - return d.raw -} - -// OrigSpec yields the original spec -func (d *Document) OrigSpec() *spec.Swagger { - return d.origSpec -} - -// ResetDefinitions gives a shallow copy with the models reset to the original spec -func (d *Document) ResetDefinitions() *Document { - defs := make(map[string]spec.Schema, len(d.origSpec.Definitions)) - for k, v := range d.origSpec.Definitions { - defs[k] = v - } - - d.spec.Definitions = defs - return d -} - -// Pristine creates a new pristine document instance based on the input data -func (d *Document) Pristine() *Document { - dd, _ := Analyzed(d.Raw(), d.Version()) - dd.pathLoader = d.pathLoader - return dd -} - -// SpecFilePath returns the file path of the spec if one is defined -func (d *Document) SpecFilePath() string { - return d.specFilePath -} - -func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { - var b bytes.Buffer - if err := gob.NewEncoder(&b).Encode(src); err != nil { - return nil, err - } - - var dst spec.Swagger - if err := gob.NewDecoder(&b).Decode(&dst); err != nil { - return nil, err - } - return &dst, nil -} diff --git a/vendor/github.com/go-openapi/spec/.editorconfig b/vendor/github.com/go-openapi/spec/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/spec/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/spec/.gitignore b/vendor/github.com/go-openapi/spec/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/spec/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/spec/.golangci.yml b/vendor/github.com/go-openapi/spec/.golangci.yml deleted file mode 100644 index 835d55e74..000000000 --- a/vendor/github.com/go-openapi/spec/.golangci.yml +++ /dev/null @@ -1,42 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 45 - maligned: - suggest-new: true - dupl: - threshold: 200 - goconst: - min-len: 2 - min-occurrences: 2 - -linters: - enable-all: true - disable: - - maligned - - unparam - - lll - - gochecknoinits - - gochecknoglobals - - funlen - - godox - - gocognit - - whitespace - - wsl - - wrapcheck - - testpackage - - nlreturn - - gomnd - - exhaustivestruct - - goerr113 - - errorlint - - nestif - - godot - - gofumpt - - paralleltest - - tparallel - - thelper - - ifshort diff --git a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/spec/LICENSE b/vendor/github.com/go-openapi/spec/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/spec/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md deleted file mode 100644 index 18782c6da..000000000 --- a/vendor/github.com/go-openapi/spec/README.md +++ /dev/null @@ -1,34 +0,0 @@ -# OAI object model - -[![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) - -[![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) -[![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) -[![Go Reference](https://pkg.go.dev/badge/github.com/go-openapi/spec.svg)](https://pkg.go.dev/github.com/go-openapi/spec) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/spec)](https://goreportcard.com/report/github.com/go-openapi/spec) - -The object model for OpenAPI specification documents. - -### FAQ - -* What does this do? - -> 1. This package knows how to marshal and unmarshal Swagger API specifications into a golang object model -> 2. It knows how to resolve $ref and expand them to make a single root document - -* How does it play with the rest of the go-openapi packages ? - -> 1. This package is at the core of the go-openapi suite of packages and [code generator](https://github.com/go-swagger/go-swagger) -> 2. There is a [spec loading package](https://github.com/go-openapi/loads) to fetch specs as JSON or YAML from local or remote locations -> 3. There is a [spec validation package](https://github.com/go-openapi/validate) built on top of it -> 4. There is a [spec analysis package](https://github.com/go-openapi/analysis) built on top of it, to analyze, flatten, fix and merge spec documents - -* Does this library support OpenAPI 3? - -> No. -> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0). -> There is no plan to make it evolve toward supporting OpenAPI 3.x. -> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. -> -> An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 diff --git a/vendor/github.com/go-openapi/spec/appveyor.yml b/vendor/github.com/go-openapi/spec/appveyor.yml deleted file mode 100644 index 090359391..000000000 --- a/vendor/github.com/go-openapi/spec/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: "0.1.{build}" - -clone_folder: C:\go-openapi\spec -shallow_clone: true # for startup speed -pull_requests: - do_not_increment_build_number: true - -#skip_tags: true -#skip_branch_with_pr: true - -# appveyor.yml -build: off - -environment: - GOPATH: c:\gopath - -stack: go 1.15 - -test_script: - - go test -v -timeout 20m ./... - -deploy: off - -notifications: - - provider: Slack - incoming_webhook: https://hooks.slack.com/services/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 - auth_token: - secure: Sf7kZf7ZGbnwWUMpffHwMu5A0cHkLK2MYY32LNTPj4+/3qC3Ghl7+9v4TSLOqOlCwdRNjOGblAq7s+GDJed6/xgRQl1JtCi1klzZNrYX4q01pgTPvvGcwbBkIYgeMaPeIRcK9OZnud7sRXdttozgTOpytps2U6Js32ip7uj5mHSg2ub0FwoSJwlS6dbezZ8+eDhoha0F/guY99BEwx8Bd+zROrT2TFGsSGOFGN6wFc7moCqTHO/YkWib13a2QNXqOxCCVBy/lt76Wp+JkeFppjHlzs/2lP3EAk13RIUAaesdEUHvIHrzCyNJEd3/+KO2DzsWOYfpktd+KBCvgaYOsoo7ubdT3IROeAegZdCgo/6xgCEsmFc9ZcqCfN5yNx2A+BZ2Vwmpws+bQ1E1+B5HDzzaiLcYfG4X2O210QVGVDLWsv1jqD+uPYeHY2WRfh5ZsIUFvaqgUEnwHwrK44/8REAhQavt1QAj5uJpsRd7CkRVPWRNK+yIky+wgbVUFEchRNmS55E7QWf+W4+4QZkQi7vUTMc9nbTUu2Es9NfvfudOpM2wZbn98fjpb/qq/nRv6Bk+ca+7XD5/IgNLMbWp2ouDdzbiHLCOfDUiHiDJhLfFZx9Bwo7ZwfzeOlbrQX66bx7xRKYmOe4DLrXhNcpbsMa8qbfxlZRCmYbubB/Y8h4= - channel: bots - on_build_success: false - on_build_failure: true - on_build_status_changed: true diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go deleted file mode 100644 index afc83850c..000000000 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ /dev/null @@ -1,297 +0,0 @@ -// Code generated by go-bindata. DO NOT EDIT. -// sources: -// schemas/jsonschema-draft-04.json (4.357kB) -// schemas/v2/schema.json (40.248kB) - -package spec - -import ( - "bytes" - "compress/gzip" - "crypto/sha256" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "time" -) - -func bindataRead(data []byte, name string) ([]byte, error) { - gz, err := gzip.NewReader(bytes.NewBuffer(data)) - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - - var buf bytes.Buffer - _, err = io.Copy(&buf, gz) - clErr := gz.Close() - - if err != nil { - return nil, fmt.Errorf("read %q: %v", name, err) - } - if clErr != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -type asset struct { - bytes []byte - info os.FileInfo - digest [sha256.Size]byte -} - -type bindataFileInfo struct { - name string - size int64 - mode os.FileMode - modTime time.Time -} - -func (fi bindataFileInfo) Name() string { - return fi.name -} -func (fi bindataFileInfo) Size() int64 { - return fi.size -} -func (fi bindataFileInfo) Mode() os.FileMode { - return fi.mode -} -func (fi bindataFileInfo) ModTime() time.Time { - return fi.modTime -} -func (fi bindataFileInfo) IsDir() bool { - return false -} -func (fi bindataFileInfo) Sys() interface{} { - return nil -} - -var _jsonschemaDraft04Json = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") - -func jsonschemaDraft04JsonBytes() ([]byte, error) { - return bindataRead( - _jsonschemaDraft04Json, - "jsonschema-draft-04.json", - ) -} - -func jsonschemaDraft04Json() (*asset, error) { - bytes, err := jsonschemaDraft04JsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(0640), modTime: time.Unix(1568963823, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe1, 0x48, 0x9d, 0xb, 0x47, 0x55, 0xf0, 0x27, 0x93, 0x30, 0x25, 0x91, 0xd3, 0xfc, 0xb8, 0xf0, 0x7b, 0x68, 0x93, 0xa8, 0x2a, 0x94, 0xf2, 0x48, 0x95, 0xf8, 0xe4, 0xed, 0xf1, 0x1b, 0x82, 0xe2}} - return a, nil -} - -var _v2SchemaJson = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\xe3\x08\xb5\x8b\x99\xbd\x82\xbc\x9e\xc2\xe8\x53\x46\x83\x3f\x33\x54\x2b\x5b\xad\x92\x79\xd9\x8f\x5d\x93\x98\xf2\xe6\xc6\x1c\xe6\x9a\x9e\xfc\x43\x82\x31\x66\x8e\x53\x77\xfe\x90\xe7\xf3\xf6\xe9\x62\x23\x3f\x10\x93\x18\xae\x72\x1a\x9d\xf9\x48\xcb\xcc\x5a\x65\xc7\x4a\x04\xf0\xf3\xd5\xd5\x05\x8a\x41\x08\xbc\x86\x86\x43\x51\x6c\xe0\x46\x57\xf6\x44\x40\x0d\xfb\xff\xa2\xc3\x7c\x3d\x39\x84\xdc\x09\x22\x64\x4f\x12\xd9\xba\xaa\xf6\xe3\xbd\x56\xdd\x91\x25\x6a\x14\x9c\x89\x34\x8e\x31\xdf\xee\x15\x7e\x2f\x39\x81\x15\x2a\x28\x95\x66\x51\xf5\xfd\x83\xc5\xfe\x15\x07\xcf\xf7\x08\xee\x1d\x8e\xb6\xc5\x52\xcc\x8c\x5a\x93\x66\xc5\xd8\x79\x38\x46\xd6\xa7\x88\x37\xc9\x2e\xe3\xd2\xa5\x7b\x4b\x3a\xdc\xa1\xdc\x9e\x29\xf1\x8c\x8a\x99\x16\x47\x8d\xd4\x78\x8b\xf6\x1c\xe9\x71\x54\x1b\x69\xa8\x4a\x93\x37\xe5\xb2\x2c\x4f\x0c\x92\xab\xa0\x73\x32\x72\x59\xd3\xf0\x2d\x8d\xed\xca\x37\x16\x19\x9e\xdb\x1c\xab\x17\x49\xc3\x0f\x37\xdc\x88\xb1\xb4\xd4\x42\xcb\x58\x5e\x6a\x52\x0b\x15\x10\x0a\xb0\x04\xe7\xf8\x58\x32\x16\x01\xa6\xcd\x01\xb2\xc2\x69\x24\x35\x38\x6f\x30\x6a\xae\x1b\xb4\x71\xaa\xad\x1d\xa0\xd6\x20\x2d\x8b\x3c\xc6\x82\x62\x27\x34\x6d\x15\x84\x7b\x43\xb1\x35\x78\xa6\x24\x77\x28\xc1\x6e\xfc\xe9\x48\x74\xf4\x15\xe3\xe1\x84\x42\x88\x40\x7a\x26\x49\x3b\x48\xb1\xa4\x19\x8e\x0c\xa7\xb5\x01\x6c\x0c\x97\x61\x8a\xc2\x32\xd8\x8c\x44\x69\x24\xbf\x65\x1d\x74\xd6\xe5\x44\xef\xec\x48\x5e\xb7\x8a\xa3\x29\x8e\x41\x64\xce\x1f\x88\xdc\x00\x47\x4b\x40\x98\x6e\xd1\x0d\x8e\x48\x98\x63\x5c\x21\xb1\x4c\x05\x0a\x58\x98\xc5\x6d\x4f\x0a\x77\x53\x4f\x8b\xc4\x44\x1f\xb2\xdf\x8d\x3b\xea\x9f\xfe\xf6\xf2\xc5\xff\x5d\x7f\xfe\x9f\xfb\x67\x8f\xff\xf3\xe9\x69\xd1\xfe\xb3\xc7\xfd\x3c\xf8\x3f\x71\x94\x82\x23\xd1\x72\x00\xb7\x42\x99\x6c\xc0\x60\x7b\x0f\x79\xea\xa8\x53\x4b\x56\x31\xfa\x0b\x52\x9f\x96\xdb\xcd\x2f\xd7\x67\xcd\x04\x19\x85\xfe\xdb\x02\x9a\x59\x03\xad\x63\x3c\xea\xff\x2e\x18\xfd\x00\xd9\xe2\x56\x60\x59\x93\xb9\xb6\xb2\x3e\x3c\x2c\xab\x0f\xa7\xb2\x89\x43\xc7\xf6\xd5\xce\x2e\xad\xa6\xa9\xed\xa6\xc6\x5a\xb4\xa6\x67\xdf\x8c\x26\x7b\x50\x5a\x91\x08\x2e\x6d\xd4\x3a\xc1\x9d\xf2\xdb\xde\x1e\xb2\x2c\x6c\xa5\x64\xc9\x16\xb4\x90\xaa\x4a\xb7\x0c\xde\x13\xc3\x2a\x9a\x11\x9b\x7a\x1b\x3d\x95\x97\x37\x31\x6b\x69\x7e\x34\xc0\x67\x1f\x66\x19\x49\xef\xf1\x25\xf5\xac\x0e\xea\x0a\x28\x8d\x4d\x7e\xd9\x57\x4b\x49\xe5\xc6\xb3\x25\xfd\xe6\x57\x42\x25\xac\xcd\xcf\x36\x74\x8e\xca\x24\x47\xe7\x80\xa8\x92\x72\xbd\x3d\x84\x2d\x65\xe2\x82\x1a\x9c\xc4\x44\x92\x1b\x10\x79\x8a\xc4\x4a\x2f\x60\x51\x04\x81\xaa\xf0\xa3\x95\x27\xd7\x12\x7b\xa3\x96\x03\x45\x96\xc1\x8a\x07\xc9\xb2\xb0\x95\x52\x8c\xef\x48\x9c\xc6\x7e\x94\xca\xc2\x0e\x07\x12\x44\xa9\x20\x37\xf0\xae\x0f\x49\xa3\x96\x9d\x4b\x42\x7b\x70\x59\x14\xee\xe0\xb2\x0f\x49\xa3\x96\x4b\x97\xbf\x00\x5d\x4b\x4f\xfc\xbb\x2b\xee\x92\xb9\x17\xb5\xaa\xb8\x0b\x97\x17\x9b\x43\xfd\xd6\xc2\xb2\xc2\x2e\x29\xcf\xfd\x87\x4a\x55\xda\x25\x63\x1f\x5a\x65\x69\x2b\x2d\x3d\x67\xe9\x41\xae\x5e\xc1\x6e\x2b\xd4\xdb\x3e\xa8\xd3\x26\xd2\x48\x92\x24\xca\x61\x86\x8f\x8c\xbb\xf2\x8e\x91\xdf\x1f\x06\x19\x33\xf3\x03\x4d\xba\xcd\xe2\x2d\xfb\x69\xe9\x16\x15\x13\xd5\x56\x85\x4e\x3c\x5b\x8a\xbf\x25\x72\x83\xee\x5e\x20\x22\xf2\xc8\xaa\x7b\xdb\x8e\xe4\x29\x58\xca\x38\xb7\x3f\x2e\x59\xb8\xbd\xa8\x16\x16\xf7\xdb\x79\x51\x9f\x5a\xb4\x8d\x87\x3a\x6e\xbc\x3e\xc5\xb4\xcd\x58\xf9\xf5\x3c\xb9\x6f\x49\xaf\x57\xc1\xfa\x1c\x5d\x6d\x88\x8a\x8b\xd3\x28\xcc\xb7\xef\x10\x8a\x4a\x74\xa9\x4a\xa7\x62\xbf\x0d\x76\x23\x6f\x59\xd9\x31\xee\x40\x11\xfb\x28\xec\x8d\x22\x1c\x13\x5a\x64\x94\x23\x16\x60\xbb\xd2\x7c\xa0\x98\xb2\xe5\x6e\xbc\x54\x33\xe0\x3e\xb9\x52\x17\xdb\xb7\x1b\xc8\x12\x20\x8c\x23\xca\x64\x7e\x78\xa3\x62\x5b\x75\x56\xd9\x9e\x2a\x91\x27\xb0\x70\x34\x1f\x90\x89\xb5\x86\x73\x7e\x71\xda\x1e\xfb\x3a\x72\xdc\x5e\x79\x88\xcb\x74\x79\xd9\x64\xe4\xd4\xc2\x9e\xce\xb1\xfe\x85\x5a\xc0\xe9\x0c\x34\x3d\xd0\x43\xce\xa1\x36\x39\xd5\xa1\x4e\xf5\xf8\xb1\xa9\x23\x08\x75\x84\xac\x53\x6c\x3a\xc5\xa6\x53\x6c\x3a\xc5\xa6\x7f\xc5\xd8\xf4\x51\xfd\xff\x25\x4e\xfa\x33\x05\xbe\x9d\x60\xd2\x04\x93\x6a\x5f\x33\x9b\x98\x50\xd2\xe1\x50\x52\xc6\xcc\xdb\x38\x91\xdb\xe6\xaa\xa2\x8f\xa1\x6a\xa6\xd4\xc6\x56\xd6\x8c\x40\x02\x68\x48\xe8\x1a\xe1\x9a\xd9\x2e\xb7\x05\xc3\x34\xda\x2a\xbb\xcd\x12\x36\x98\x22\x50\x4c\xa1\x1b\xc5\xd5\x84\xf0\xbe\x24\x84\xf7\x2f\x22\x37\xef\x94\xd7\x9f\xa0\xde\x04\xf5\x26\xa8\x37\x41\x3d\x64\x40\x3d\xe5\xf2\xde\x60\x89\x27\xb4\x37\xa1\xbd\xda\xd7\xd2\x2c\x26\xc0\x37\x01\x3e\x1b\xef\x5f\x06\xe0\x6b\x7c\x5c\x91\x08\x26\x10\x38\x81\xc0\x09\x04\x76\x4a\x3d\x81\xc0\xbf\x12\x08\x4c\xb0\xdc\x7c\x99\x00\xd0\x75\x70\xb4\xf8\x5a\x7c\xea\xde\x3e\x39\x08\x30\x5a\x27\x35\xed\xb4\x65\xad\x69\x74\x10\x88\x79\xe2\x30\x52\x19\xd6\x04\x21\xa7\x95\xd5\x0e\x03\xf8\xda\x20\xd7\x84\xb4\x26\xa4\x35\x21\xad\x09\x69\x21\x03\x69\x51\x46\xff\xff\x18\x9b\x54\xed\x87\x47\x06\x9d\x4e\x73\x6e\x9a\xb3\xa9\xce\x83\x5e\x4b\xc6\x71\x20\x45\xd7\x72\xf5\x40\x72\x0e\x34\x6c\xf4\x6c\xf3\xba\x5e\x4b\x97\x0e\x52\xb8\xbe\x8b\x79\xa0\x10\x86\xa1\x75\xb0\x6f\xec\xc8\xf4\x3d\x4d\x7b\x86\xc2\x02\x31\x12\x51\xbf\x07\x94\xad\x10\xd6\x2e\x79\xcf\xe9\x1c\xf5\x1e\x31\x23\x5c\x18\xfb\x9c\xfb\x70\xe0\x62\xbd\xf7\xb5\x94\xcf\xf3\xf6\xfa\xc5\x4e\x9c\x85\x76\x1d\xae\x37\xbc\xde\xa3\x41\xcb\x29\xd0\x5e\x70\x67\x50\x93\x6d\x98\xa8\xd3\x67\x0f\x68\xb1\xeb\x38\x47\x07\x10\x1b\xd2\xe2\x18\x68\x6d\x40\xbb\xa3\x40\xba\x21\xf2\x8e\x81\xfb\xf6\x92\x77\x2f\x70\xe8\xdb\xb2\x36\xbf\x30\x91\xc5\x21\xe7\x45\xcc\x34\x0c\x48\x8e\xd0\xf2\x9b\x7c\x3c\xbd\x1c\x04\x3e\x07\xe8\x7c\x2f\x84\x7a\x48\x4d\x1f\xba\xe1\x76\x45\x7b\x60\xe0\x01\xca\xee\x04\xca\x31\xbe\x73\x5f\xa3\x70\x0c\xad\x1f\xa5\xf5\x76\xd5\xbb\xd2\x7e\xfb\x30\x90\xcf\xfa\x67\x7a\xe6\xc3\x37\x42\x19\xe2\xc9\x9c\x61\x4c\xe7\xd1\x77\x55\x86\x6e\x8f\x7b\x85\x42\x33\xa3\xaa\x57\xae\xfd\xd5\xcc\x9c\x56\x68\xe2\xde\x0e\xa8\x2c\xa9\xb0\x7d\xf0\x54\x2d\x80\xf2\x48\x39\x3d\x98\x1a\x6d\x0b\x9d\xba\x53\xfb\xce\xf8\xd1\x7e\xbb\x60\x4f\x06\xf5\xce\xda\xab\xeb\xca\xcb\xd5\xac\x20\xda\x72\x3b\xa2\x4b\x38\xd7\xb5\x89\xbe\x42\xd9\xb9\x73\xc4\x0c\x6d\xb7\xd9\xf8\x8d\xbd\x3e\x9c\xf5\x53\x68\x48\x14\x36\x8f\x09\xc5\x92\xf1\x21\xd1\x09\x07\x1c\xbe\xa7\x91\xf3\x6a\xc8\xc1\x57\xb0\xdd\xc5\xc6\x1d\xad\x76\x1d\xa8\x82\x0e\x4c\x38\xfe\xa5\x8c\xc5\x0a\x40\x5d\xa1\xbb\x98\xd1\xfb\x74\x61\xed\x1a\x98\xaf\x3c\x8c\x1e\xe3\xc2\x92\x29\x74\x3e\x99\xd0\xf9\x41\x50\xd0\x38\x4b\x57\x7e\x5b\x7a\x0e\xe6\xce\x4e\xd7\x19\x35\x57\xbb\x3c\x3c\xd2\x5e\x4f\x4b\x4c\xf7\x0f\x4d\x2b\x91\x5d\x94\xa6\x95\xc8\x69\x25\x72\x5a\x89\x7c\xb8\x95\xc8\x07\x80\x8c\xda\x9c\x64\x7b\xb7\x71\xdf\x57\x12\x4b\x9a\x1f\x72\x0c\x13\x03\xad\x3c\xd5\x4e\xde\x8e\x57\x13\x6d\x34\x86\xcf\x97\xe6\xa4\x68\xc4\xb0\xf6\xc9\xc2\xeb\x8d\x0b\xd7\xcd\xfe\xba\xa6\xf5\x30\xeb\x30\x33\xbe\xc7\x56\x27\xab\x08\xd9\x6d\xbb\x09\xee\x7c\x2d\xcf\xee\x87\x38\xac\xc8\xdd\x90\x9a\x58\x4a\x4e\x96\xa9\x79\x79\xf3\xde\x20\xf0\x96\xe3\x24\x19\xeb\xba\xf2\x53\x19\xab\x12\xaf\x47\xb3\xa0\x3e\xef\x9b\x8d\x6d\x6d\x7b\xde\x3b\x3b\x1a\xc0\x3f\x95\x7e\xed\x78\xfb\x76\xb8\xaf\xb3\xdd\xc5\xeb\x95\xed\x5a\x62\x41\x82\xb3\x54\x6e\x80\x4a\x92\x6f\x36\xbd\x34\xae\xde\x6f\xa4\xc0\xbc\x08\xe3\x84\xfc\x1d\xb6\xe3\xd0\x62\x38\x95\x9b\x57\xe7\x71\x12\x91\x80\xc8\x31\x69\x5e\x60\x21\x6e\x19\x0f\xc7\xa4\x79\x96\x28\x3e\x47\x54\x65\x41\x36\x08\x40\x88\x1f\x58\x08\x56\xaa\xd5\xbf\xaf\xad\x96\xd7\xd6\xcf\x87\xf5\x34\x0f\x71\x93\x6e\x26\xed\x98\x5b\x9f\x4f\xcf\x95\x34\xc6\xd7\x11\xfa\xb0\x81\x22\x1a\xdb\xdf\x8e\xdc\xc3\xb9\xf8\xdd\x5d\x3c\x74\xe6\xea\xb7\x8b\xbf\xf5\x6e\xb3\x46\x2e\x64\xf4\xab\x3c\x4e\xcf\x36\x1d\xfe\xfa\xb8\x36\xba\x8a\xd8\xad\xf6\xc6\x41\x2a\x37\x8c\x17\x0f\xda\xfe\xda\xe7\x65\xbc\x71\x2c\x36\x57\x8a\x47\x12\x4c\xf1\xbd\x77\x6b\xa4\x50\x7e\x77\x7b\x22\x60\x89\xef\xcd\xf5\xb9\x0c\x97\x79\x0d\x2b\x35\x43\xcb\x3d\x24\xf1\x78\xfc\xf8\xcb\x1f\x15\x06\xe2\x78\xd8\x51\x21\xd9\x1f\xf0\xf5\x8f\x86\xa4\x50\xfa\xb1\x47\x43\xa5\xdd\x69\x14\xe8\xa3\xc0\x86\x91\xa7\x81\x50\xb4\x7c\xc0\x81\x80\x77\x7a\x9f\xc6\xc2\xa9\x8c\x05\x33\xb0\x3b\x31\xa4\xf4\xd7\x1b\x26\x55\x97\x7c\x65\xf8\x69\x1a\x84\x8e\x41\x78\xd9\xec\xc5\x11\x16\x1e\x74\x91\xf5\x56\xf5\x57\x49\x47\x5c\x92\xa9\x1e\x99\x36\xf4\xdb\xb1\x0e\xd3\x78\x02\xb0\x9b\x25\xcb\xe9\xe9\x1d\x0d\x44\x01\x42\x08\x91\x64\xd9\xdd\x37\x08\x17\xef\xf9\xe5\x0f\xbd\x46\x91\xf5\xf9\x89\x92\x37\xdd\x89\x59\x44\x1f\x9c\xee\x34\x1e\xbe\x47\x83\x32\x72\x8e\x37\xdf\xac\x69\x38\xef\x75\xb0\xda\xdb\xac\x83\x94\x2f\x39\xa6\x62\x05\x1c\x25\x9c\x49\x16\xb0\xa8\x3c\xc7\x7e\x76\x71\x3e\x6f\xb5\x24\xe7\xe8\xb7\xb9\xc7\x6c\x43\x92\xee\x21\xd4\x17\xa1\x7f\xba\x35\xfe\xae\x39\xbc\xde\xba\x69\xd9\x8e\xe1\x62\xde\x64\x7d\x16\x88\x1b\xed\x29\x11\xfd\x4f\xa9\xff\x99\x90\xc4\xf6\xf4\xf9\x6e\xe9\x28\x23\xd7\xca\xe5\xee\xee\x9f\x63\xb1\x5b\xfb\x10\xd7\x2f\x1d\xf2\xe3\xbf\xb9\xb5\x6f\xa4\x6d\x7d\x25\x79\xfb\x24\x31\xea\x56\xbe\x5d\x53\xcd\x2d\x36\xa3\x6d\xdf\xab\x1c\xb8\x6d\x6f\xc0\x98\xa7\xdd\xaa\x86\x8c\x1d\x39\xa3\x9d\x70\x2b\x9b\x68\xd9\xfd\x33\xfe\xa9\xb6\x4a\x2e\x63\x0f\xcf\x68\x27\xd9\x4c\xb9\x46\x6d\xcb\xbe\xa1\xa8\xd6\x5f\xc6\xd6\x9f\xf1\x4f\xf4\xd4\xb4\x78\xd0\xd6\xf4\x13\x3c\x3b\xac\xd0\xdc\x90\x34\xda\xc9\xb4\x9a\x1a\x8d\xbd\x93\x87\xd4\xe2\x21\x1b\xb3\x2b\xd1\xbe\xe7\x69\xd4\x53\x67\xd5\x40\xa0\xe3\x19\x3f\x6d\x1a\xbc\x0e\x86\x3c\x10\xb4\x3d\x2a\xcd\x78\x32\xe6\xab\xbd\x36\xc9\xf4\x3a\x58\xae\xc3\xf4\x47\xea\xbf\xfb\x47\xff\x0d\x00\x00\xff\xff\xd2\x32\x5a\x28\x38\x9d\x00\x00") - -func v2SchemaJsonBytes() ([]byte, error) { - return bindataRead( - _v2SchemaJson, - "v2/schema.json", - ) -} - -func v2SchemaJson() (*asset, error) { - bytes, err := v2SchemaJsonBytes() - if err != nil { - return nil, err - } - - info := bindataFileInfo{name: "v2/schema.json", size: 40248, mode: os.FileMode(0640), modTime: time.Unix(1568964748, 0)} - a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xab, 0x88, 0x5e, 0xf, 0xbf, 0x17, 0x74, 0x0, 0xb2, 0x5a, 0x7f, 0xbc, 0x58, 0xcd, 0xc, 0x25, 0x73, 0xd5, 0x29, 0x1c, 0x7a, 0xd0, 0xce, 0x79, 0xd4, 0x89, 0x31, 0x27, 0x90, 0xf2, 0xff, 0xe6}} - return a, nil -} - -// Asset loads and returns the asset for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func Asset(name string) ([]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) - } - return a.bytes, nil - } - return nil, fmt.Errorf("Asset %s not found", name) -} - -// AssetString returns the asset contents as a string (instead of a []byte). -func AssetString(name string) (string, error) { - data, err := Asset(name) - return string(data), err -} - -// MustAsset is like Asset but panics when Asset would return an error. -// It simplifies safe initialization of global variables. -func MustAsset(name string) []byte { - a, err := Asset(name) - if err != nil { - panic("asset: Asset(" + name + "): " + err.Error()) - } - - return a -} - -// MustAssetString is like AssetString but panics when Asset would return an -// error. It simplifies safe initialization of global variables. -func MustAssetString(name string) string { - return string(MustAsset(name)) -} - -// AssetInfo loads and returns the asset info for the given name. -// It returns an error if the asset could not be found or -// could not be loaded. -func AssetInfo(name string) (os.FileInfo, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) - } - return a.info, nil - } - return nil, fmt.Errorf("AssetInfo %s not found", name) -} - -// AssetDigest returns the digest of the file with the given name. It returns an -// error if the asset could not be found or the digest could not be loaded. -func AssetDigest(name string) ([sha256.Size]byte, error) { - canonicalName := strings.Replace(name, "\\", "/", -1) - if f, ok := _bindata[canonicalName]; ok { - a, err := f() - if err != nil { - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err) - } - return a.digest, nil - } - return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name) -} - -// Digests returns a map of all known files and their checksums. -func Digests() (map[string][sha256.Size]byte, error) { - mp := make(map[string][sha256.Size]byte, len(_bindata)) - for name := range _bindata { - a, err := _bindata[name]() - if err != nil { - return nil, err - } - mp[name] = a.digest - } - return mp, nil -} - -// AssetNames returns the names of the assets. -func AssetNames() []string { - names := make([]string, 0, len(_bindata)) - for name := range _bindata { - names = append(names, name) - } - return names -} - -// _bindata is a table, holding each asset generator, mapped to its name. -var _bindata = map[string]func() (*asset, error){ - "jsonschema-draft-04.json": jsonschemaDraft04Json, - - "v2/schema.json": v2SchemaJson, -} - -// AssetDir returns the file names below a certain -// directory embedded in the file by go-bindata. -// For example if you run go-bindata on data/... and data contains the -// following hierarchy: -// data/ -// foo.txt -// img/ -// a.png -// b.png -// then AssetDir("data") would return []string{"foo.txt", "img"}, -// AssetDir("data/img") would return []string{"a.png", "b.png"}, -// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and -// AssetDir("") will return []string{"data"}. -func AssetDir(name string) ([]string, error) { - node := _bintree - if len(name) != 0 { - canonicalName := strings.Replace(name, "\\", "/", -1) - pathList := strings.Split(canonicalName, "/") - for _, p := range pathList { - node = node.Children[p] - if node == nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - } - } - if node.Func != nil { - return nil, fmt.Errorf("Asset %s not found", name) - } - rv := make([]string, 0, len(node.Children)) - for childName := range node.Children { - rv = append(rv, childName) - } - return rv, nil -} - -type bintree struct { - Func func() (*asset, error) - Children map[string]*bintree -} - -var _bintree = &bintree{nil, map[string]*bintree{ - "jsonschema-draft-04.json": {jsonschemaDraft04Json, map[string]*bintree{}}, - "v2": {nil, map[string]*bintree{ - "schema.json": {v2SchemaJson, map[string]*bintree{}}, - }}, -}} - -// RestoreAsset restores an asset under the given directory. -func RestoreAsset(dir, name string) error { - data, err := Asset(name) - if err != nil { - return err - } - info, err := AssetInfo(name) - if err != nil { - return err - } - err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) - if err != nil { - return err - } - err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) - if err != nil { - return err - } - return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) -} - -// RestoreAssets restores an asset under the given directory recursively. -func RestoreAssets(dir, name string) error { - children, err := AssetDir(name) - // File - if err != nil { - return RestoreAsset(dir, name) - } - // Dir - for _, child := range children { - err = RestoreAssets(dir, filepath.Join(name, child)) - if err != nil { - return err - } - } - return nil -} - -func _filePath(dir, name string) string { - canonicalName := strings.Replace(name, "\\", "/", -1) - return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...) -} diff --git a/vendor/github.com/go-openapi/spec/cache.go b/vendor/github.com/go-openapi/spec/cache.go deleted file mode 100644 index 122993b44..000000000 --- a/vendor/github.com/go-openapi/spec/cache.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "sync" -) - -// ResolutionCache a cache for resolving urls -type ResolutionCache interface { - Get(string) (interface{}, bool) - Set(string, interface{}) -} - -type simpleCache struct { - lock sync.RWMutex - store map[string]interface{} -} - -func (s *simpleCache) ShallowClone() ResolutionCache { - store := make(map[string]interface{}, len(s.store)) - s.lock.RLock() - for k, v := range s.store { - store[k] = v - } - s.lock.RUnlock() - - return &simpleCache{ - store: store, - } -} - -// Get retrieves a cached URI -func (s *simpleCache) Get(uri string) (interface{}, bool) { - s.lock.RLock() - v, ok := s.store[uri] - - s.lock.RUnlock() - return v, ok -} - -// Set caches a URI -func (s *simpleCache) Set(uri string, data interface{}) { - s.lock.Lock() - s.store[uri] = data - s.lock.Unlock() -} - -var ( - // resCache is a package level cache for $ref resolution and expansion. - // It is initialized lazily by methods that have the need for it: no - // memory is allocated unless some expander methods are called. - // - // It is initialized with JSON schema and swagger schema, - // which do not mutate during normal operations. - // - // All subsequent utilizations of this cache are produced from a shallow - // clone of this initial version. - resCache *simpleCache - onceCache sync.Once - - _ ResolutionCache = &simpleCache{} -) - -// initResolutionCache initializes the URI resolution cache. To be wrapped in a sync.Once.Do call. -func initResolutionCache() { - resCache = defaultResolutionCache() -} - -func defaultResolutionCache() *simpleCache { - return &simpleCache{store: map[string]interface{}{ - "http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(), - "http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(), - }} -} - -func cacheOrDefault(cache ResolutionCache) ResolutionCache { - onceCache.Do(initResolutionCache) - - if cache != nil { - return cache - } - - // get a shallow clone of the base cache with swagger and json schema - return resCache.ShallowClone() -} diff --git a/vendor/github.com/go-openapi/spec/contact_info.go b/vendor/github.com/go-openapi/spec/contact_info.go deleted file mode 100644 index 2f7bb219b..000000000 --- a/vendor/github.com/go-openapi/spec/contact_info.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// ContactInfo contact information for the exposed API. -// -// For more information: http://goo.gl/8us55a#contactObject -type ContactInfo struct { - ContactInfoProps - VendorExtensible -} - -// ContactInfoProps hold the properties of a ContactInfo object -type ContactInfoProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` - Email string `json:"email,omitempty"` -} - -// UnmarshalJSON hydrates ContactInfo from json -func (c *ContactInfo) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &c.ContactInfoProps); err != nil { - return err - } - return json.Unmarshal(data, &c.VendorExtensible) -} - -// MarshalJSON produces ContactInfo as json -func (c ContactInfo) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(c.ContactInfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(c.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/debug.go b/vendor/github.com/go-openapi/spec/debug.go deleted file mode 100644 index fc889f6d0..000000000 --- a/vendor/github.com/go-openapi/spec/debug.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "fmt" - "log" - "os" - "path" - "runtime" -) - -// Debug is true when the SWAGGER_DEBUG env var is not empty. -// -// It enables a more verbose logging of this package. -var Debug = os.Getenv("SWAGGER_DEBUG") != "" - -var ( - // specLogger is a debug logger for this package - specLogger *log.Logger -) - -func init() { - debugOptions() -} - -func debugOptions() { - specLogger = log.New(os.Stdout, "spec:", log.LstdFlags) -} - -func debugLog(msg string, args ...interface{}) { - // A private, trivial trace logger, based on go-openapi/spec/expander.go:debugLog() - if Debug { - _, file1, pos1, _ := runtime.Caller(1) - specLogger.Printf("%s:%d: %s", path.Base(file1), pos1, fmt.Sprintf(msg, args...)) - } -} diff --git a/vendor/github.com/go-openapi/spec/errors.go b/vendor/github.com/go-openapi/spec/errors.go deleted file mode 100644 index 6992c7ba7..000000000 --- a/vendor/github.com/go-openapi/spec/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package spec - -import "errors" - -// Error codes -var ( - // ErrUnknownTypeForReference indicates that a resolved reference was found in an unsupported container type - ErrUnknownTypeForReference = errors.New("unknown type for the resolved reference") - - // ErrResolveRefNeedsAPointer indicates that a $ref target must be a valid JSON pointer - ErrResolveRefNeedsAPointer = errors.New("resolve ref: target needs to be a pointer") - - // ErrDerefUnsupportedType indicates that a resolved reference was found in an unsupported container type. - // At the moment, $ref are supported only inside: schemas, parameters, responses, path items - ErrDerefUnsupportedType = errors.New("deref: unsupported type") - - // ErrExpandUnsupportedType indicates that $ref expansion is attempted on some invalid type - ErrExpandUnsupportedType = errors.New("expand: unsupported type. Input should be of type *Parameter or *Response") -) diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go deleted file mode 100644 index d4ea889d4..000000000 --- a/vendor/github.com/go-openapi/spec/expander.go +++ /dev/null @@ -1,594 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" -) - -// ExpandOptions provides options for the spec expander. -// -// RelativeBase is the path to the root document. This can be a remote URL or a path to a local file. -// -// If left empty, the root document is assumed to be located in the current working directory: -// all relative $ref's will be resolved from there. -// -// PathLoader injects a document loading method. By default, this resolves to the function provided by the SpecLoader package variable. -// -type ExpandOptions struct { - RelativeBase string // the path to the root document to expand. This is a file, not a directory - SkipSchemas bool // do not expand schemas, just paths, parameters and responses - ContinueOnError bool // continue expanding even after and error is found - PathLoader func(string) (json.RawMessage, error) `json:"-"` // the document loading method that takes a path as input and yields a json document - AbsoluteCircularRef bool // circular $ref remaining after expansion remain absolute URLs -} - -func optionsOrDefault(opts *ExpandOptions) *ExpandOptions { - if opts != nil { - clone := *opts // shallow clone to avoid internal changes to be propagated to the caller - if clone.RelativeBase != "" { - clone.RelativeBase = normalizeBase(clone.RelativeBase) - } - // if the relative base is empty, let the schema loader choose a pseudo root document - return &clone - } - return &ExpandOptions{} -} - -// ExpandSpec expands the references in a swagger spec -func ExpandSpec(spec *Swagger, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(spec, options, nil, nil) - - specBasePath := options.RelativeBase - - if !options.SkipSchemas { - for key, definition := range spec.Definitions { - parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) - - def, err := expandSchema(definition, parentRefs, resolver, specBasePath) - if resolver.shouldStopOnError(err) { - return err - } - if def != nil { - spec.Definitions[key] = *def - } - } - } - - for key := range spec.Parameters { - parameter := spec.Parameters[key] - if err := expandParameterOrResponse(¶meter, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Parameters[key] = parameter - } - - for key := range spec.Responses { - response := spec.Responses[key] - if err := expandParameterOrResponse(&response, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Responses[key] = response - } - - if spec.Paths != nil { - for key := range spec.Paths.Paths { - pth := spec.Paths.Paths[key] - if err := expandPathItem(&pth, resolver, specBasePath); resolver.shouldStopOnError(err) { - return err - } - spec.Paths.Paths[key] = pth - } - } - - return nil -} - -const rootBase = ".root" - -// baseForRoot loads in the cache the root document and produces a fake ".root" base path entry -// for further $ref resolution -// -// Setting the cache is optional and this parameter may safely be left to nil. -func baseForRoot(root interface{}, cache ResolutionCache) string { - if root == nil { - return "" - } - - // cache the root document to resolve $ref's - normalizedBase := normalizeBase(rootBase) - cache.Set(normalizedBase, root) - - return normalizedBase -} - -// ExpandSchema expands the refs in the schema object with reference to the root object. -// -// go-openapi/validate uses this function. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandSchemaWithBasePath to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - if root == nil { - root = schema - } - - opts := &ExpandOptions{ - // when a root is specified, cache the root as an in-memory document for $ref retrieval - RelativeBase: baseForRoot(root, cache), - SkipSchemas: false, - ContinueOnError: false, - } - - return ExpandSchemaWithBasePath(schema, cache, opts) -} - -// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options. -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandSchemaWithBasePath(schema *Schema, cache ResolutionCache, opts *ExpandOptions) error { - if schema == nil { - return nil - } - - cache = cacheOrDefault(cache) - - opts = optionsOrDefault(opts) - - resolver := defaultSchemaLoader(nil, opts, cache, nil) - - parentRefs := make([]string, 0, 10) - s, err := expandSchema(*schema, parentRefs, resolver, opts.RelativeBase) - if err != nil { - return err - } - if s != nil { - // guard for when continuing on error - *schema = *s - } - - return nil -} - -func expandItems(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Items == nil { - return &target, nil - } - - // array - if target.Items.Schema != nil { - t, err := expandSchema(*target.Items.Schema, parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - *target.Items.Schema = *t - } - - // tuple - for i := range target.Items.Schemas { - t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver, basePath) - if err != nil { - return nil, err - } - target.Items.Schemas[i] = *t - } - - return &target, nil -} - -func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - if target.Ref.String() == "" && target.Ref.IsRoot() { - newRef := normalizeRef(&target.Ref, basePath) - target.Ref = *newRef - return &target, nil - } - - // change the base path of resolution when an ID is encountered - // otherwise the basePath should inherit the parent's - if target.ID != "" { - basePath, _ = resolver.setSchemaID(target, target.ID, basePath) - } - - if target.Ref.String() != "" { - return expandSchemaRef(target, parentRefs, resolver, basePath) - } - - for k := range target.Definitions { - tt, err := expandSchema(target.Definitions[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if tt != nil { - target.Definitions[k] = *tt - } - } - - t, err := expandItems(target, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target = *t - } - - for i := range target.AllOf { - t, err := expandSchema(target.AllOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AllOf[i] = *t - } - } - - for i := range target.AnyOf { - t, err := expandSchema(target.AnyOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.AnyOf[i] = *t - } - } - - for i := range target.OneOf { - t, err := expandSchema(target.OneOf[i], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.OneOf[i] = *t - } - } - - if target.Not != nil { - t, err := expandSchema(*target.Not, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Not = *t - } - } - - for k := range target.Properties { - t, err := expandSchema(target.Properties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.Properties[k] = *t - } - } - - if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil { - t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalProperties.Schema = *t - } - } - - for k := range target.PatternProperties { - t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - target.PatternProperties[k] = *t - } - } - - for k := range target.Dependencies { - if target.Dependencies[k].Schema != nil { - t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.Dependencies[k].Schema = *t - } - } - } - - if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil { - t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return &target, err - } - if t != nil { - *target.AdditionalItems.Schema = *t - } - } - return &target, nil -} - -func expandSchemaRef(target Schema, parentRefs []string, resolver *schemaLoader, basePath string) (*Schema, error) { - // if a Ref is found, all sibling fields are skipped - // Ref also changes the resolution scope of children expandSchema - - // here the resolution scope is changed because a $ref was encountered - normalizedRef := normalizeRef(&target.Ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if resolver.isCircular(normalizedRef, basePath, parentRefs...) { - // this means there is a cycle in the recursion tree: return the Ref - // - circular refs cannot be expanded. We leave them as ref. - // - denormalization means that a new local file ref is set relative to the original basePath - debugLog("short circuit circular ref: basePath: %s, normalizedPath: %s, normalized ref: %s", - basePath, normalizedBasePath, normalizedRef.String()) - if !resolver.options.AbsoluteCircularRef { - target.Ref = denormalizeRef(normalizedRef, resolver.context.basePath, resolver.context.rootID) - } else { - target.Ref = *normalizedRef - } - return &target, nil - } - - var t *Schema - err := resolver.Resolve(&target.Ref, &t, basePath) - if resolver.shouldStopOnError(err) { - return nil, err - } - - if t == nil { - // guard for when continuing on error - return &target, nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - transitiveResolver := resolver.transitiveResolver(basePath, target.Ref) - - basePath = resolver.updateBasePath(transitiveResolver, normalizedBasePath) - - return expandSchema(*t, parentRefs, transitiveResolver, basePath) -} - -func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) error { - if pathItem == nil { - return nil - } - - parentRefs := make([]string, 0, 10) - if err := resolver.deref(pathItem, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - if pathItem.Ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, pathItem.Ref) - basePath = transitiveResolver.updateBasePath(resolver, basePath) - resolver = transitiveResolver - } - - pathItem.Ref = Ref{} - for i := range pathItem.Parameters { - if err := expandParameterOrResponse(&(pathItem.Parameters[i]), resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - ops := []*Operation{ - pathItem.Get, - pathItem.Head, - pathItem.Options, - pathItem.Put, - pathItem.Post, - pathItem.Patch, - pathItem.Delete, - } - for _, op := range ops { - if err := expandOperation(op, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - } - - return nil -} - -func expandOperation(op *Operation, resolver *schemaLoader, basePath string) error { - if op == nil { - return nil - } - - for i := range op.Parameters { - param := op.Parameters[i] - if err := expandParameterOrResponse(¶m, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - op.Parameters[i] = param - } - - if op.Responses == nil { - return nil - } - - responses := op.Responses - if err := expandParameterOrResponse(responses.Default, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - - for code := range responses.StatusCodeResponses { - response := responses.StatusCodeResponses[code] - if err := expandParameterOrResponse(&response, resolver, basePath); resolver.shouldStopOnError(err) { - return err - } - responses.StatusCodeResponses[code] = response - } - - return nil -} - -// ExpandResponseWithRoot expands a response based on a root document, not a fetchable document -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandResponse to resolve external references). -// -// Setting the cache is optional and this parameter may safely be left to nil. -func ExpandResponseWithRoot(response *Response, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandResponse expands a response based on a basepath -// -// All refs inside response will be resolved relative to basePath -func ExpandResponse(response *Response, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(response, resolver, opts.RelativeBase) -} - -// ExpandParameterWithRoot expands a parameter based on a root document, not a fetchable document. -// -// Notice that it is impossible to reference a json schema in a different document other than root -// (use ExpandParameter to resolve external references). -func ExpandParameterWithRoot(parameter *Parameter, root interface{}, cache ResolutionCache) error { - cache = cacheOrDefault(cache) - - opts := &ExpandOptions{ - RelativeBase: baseForRoot(root, cache), - } - resolver := defaultSchemaLoader(root, opts, cache, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -// ExpandParameter expands a parameter based on a basepath. -// This is the exported version of expandParameter -// all refs inside parameter will be resolved relative to basePath -func ExpandParameter(parameter *Parameter, basePath string) error { - opts := optionsOrDefault(&ExpandOptions{ - RelativeBase: basePath, - }) - resolver := defaultSchemaLoader(nil, opts, nil, nil) - - return expandParameterOrResponse(parameter, resolver, opts.RelativeBase) -} - -func getRefAndSchema(input interface{}) (*Ref, *Schema, error) { - var ( - ref *Ref - sch *Schema - ) - - switch refable := input.(type) { - case *Parameter: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - case *Response: - if refable == nil { - return nil, nil, nil - } - ref = &refable.Ref - sch = refable.Schema - default: - return nil, nil, fmt.Errorf("unsupported type: %T: %w", input, ErrExpandUnsupportedType) - } - - return ref, sch, nil -} - -func expandParameterOrResponse(input interface{}, resolver *schemaLoader, basePath string) error { - ref, _, err := getRefAndSchema(input) - if err != nil { - return err - } - - if ref == nil { - return nil - } - - parentRefs := make([]string, 0, 10) - if err = resolver.deref(input, parentRefs, basePath); resolver.shouldStopOnError(err) { - return err - } - - ref, sch, _ := getRefAndSchema(input) - if ref.String() != "" { - transitiveResolver := resolver.transitiveResolver(basePath, *ref) - basePath = resolver.updateBasePath(transitiveResolver, basePath) - resolver = transitiveResolver - } - - if sch == nil { - // nothing to be expanded - if ref != nil { - *ref = Ref{} - } - return nil - } - - if sch.Ref.String() != "" { - rebasedRef, ern := NewRef(normalizeURI(sch.Ref.String(), basePath)) - if ern != nil { - return ern - } - - switch { - case resolver.isCircular(&rebasedRef, basePath, parentRefs...): - // this is a circular $ref: stop expansion - if !resolver.options.AbsoluteCircularRef { - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - } else { - sch.Ref = rebasedRef - } - case !resolver.options.SkipSchemas: - // schema expanded to a $ref in another root - sch.Ref = rebasedRef - debugLog("rebased to: %s", sch.Ref.String()) - default: - // skip schema expansion but rebase $ref to schema - sch.Ref = denormalizeRef(&rebasedRef, resolver.context.basePath, resolver.context.rootID) - } - } - - if ref != nil { - *ref = Ref{} - } - - // expand schema - if !resolver.options.SkipSchemas { - s, err := expandSchema(*sch, parentRefs, resolver, basePath) - if resolver.shouldStopOnError(err) { - return err - } - if s == nil { - // guard for when continuing on error - return nil - } - *sch = *s - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/external_docs.go b/vendor/github.com/go-openapi/spec/external_docs.go deleted file mode 100644 index 88add91b2..000000000 --- a/vendor/github.com/go-openapi/spec/external_docs.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// ExternalDocumentation allows referencing an external resource for -// extended documentation. -// -// For more information: http://goo.gl/8us55a#externalDocumentationObject -type ExternalDocumentation struct { - Description string `json:"description,omitempty"` - URL string `json:"url,omitempty"` -} diff --git a/vendor/github.com/go-openapi/spec/header.go b/vendor/github.com/go-openapi/spec/header.go deleted file mode 100644 index 9dfd17b18..000000000 --- a/vendor/github.com/go-openapi/spec/header.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonArray = "array" -) - -// HeaderProps describes a response header -type HeaderProps struct { - Description string `json:"description,omitempty"` -} - -// Header describes a header for a response of the API -// -// For more information: http://goo.gl/8us55a#headerObject -type Header struct { - CommonValidations - SimpleSchema - VendorExtensible - HeaderProps -} - -// ResponseHeader creates a new header instance for use in a response -func ResponseHeader() *Header { - return new(Header) -} - -// WithDescription sets the description on this response, allows for chaining -func (h *Header) WithDescription(description string) *Header { - h.Description = description - return h -} - -// Typed a fluent builder method for the type of parameter -func (h *Header) Typed(tpe, format string) *Header { - h.Type = tpe - h.Format = format - return h -} - -// CollectionOf a fluent builder method for an array item -func (h *Header) CollectionOf(items *Items, format string) *Header { - h.Type = jsonArray - h.Items = items - h.CollectionFormat = format - return h -} - -// WithDefault sets the default value on this item -func (h *Header) WithDefault(defaultValue interface{}) *Header { - h.Default = defaultValue - return h -} - -// WithMaxLength sets a max length value -func (h *Header) WithMaxLength(max int64) *Header { - h.MaxLength = &max - return h -} - -// WithMinLength sets a min length value -func (h *Header) WithMinLength(min int64) *Header { - h.MinLength = &min - return h -} - -// WithPattern sets a pattern value -func (h *Header) WithPattern(pattern string) *Header { - h.Pattern = pattern - return h -} - -// WithMultipleOf sets a multiple of value -func (h *Header) WithMultipleOf(number float64) *Header { - h.MultipleOf = &number - return h -} - -// WithMaximum sets a maximum number value -func (h *Header) WithMaximum(max float64, exclusive bool) *Header { - h.Maximum = &max - h.ExclusiveMaximum = exclusive - return h -} - -// WithMinimum sets a minimum number value -func (h *Header) WithMinimum(min float64, exclusive bool) *Header { - h.Minimum = &min - h.ExclusiveMinimum = exclusive - return h -} - -// WithEnum sets a the enum values (replace) -func (h *Header) WithEnum(values ...interface{}) *Header { - h.Enum = append([]interface{}{}, values...) - return h -} - -// WithMaxItems sets the max items -func (h *Header) WithMaxItems(size int64) *Header { - h.MaxItems = &size - return h -} - -// WithMinItems sets the min items -func (h *Header) WithMinItems(size int64) *Header { - h.MinItems = &size - return h -} - -// UniqueValues dictates that this array can only have unique items -func (h *Header) UniqueValues() *Header { - h.UniqueItems = true - return h -} - -// AllowDuplicates this array can have duplicates -func (h *Header) AllowDuplicates() *Header { - h.UniqueItems = false - return h -} - -// WithValidations is a fluent method to set header validations -func (h *Header) WithValidations(val CommonValidations) *Header { - h.SetValidations(SchemaValidations{CommonValidations: val}) - return h -} - -// MarshalJSON marshal this to JSON -func (h Header) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(h.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(h.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(h.HeaderProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// UnmarshalJSON unmarshals this header from JSON -func (h *Header) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &h.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &h.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &h.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &h.HeaderProps) -} - -// JSONLookup look up a value by the json property name -func (h Header) JSONLookup(token string) (interface{}, error) { - if ex, ok := h.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(h.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(h.HeaderProps, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/info.go b/vendor/github.com/go-openapi/spec/info.go deleted file mode 100644 index c458b49b2..000000000 --- a/vendor/github.com/go-openapi/spec/info.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Extensions vendor specific extensions -type Extensions map[string]interface{} - -// Add adds a value to these extensions -func (e Extensions) Add(key string, value interface{}) { - realKey := strings.ToLower(key) - e[realKey] = value -} - -// GetString gets a string value from the extensions -func (e Extensions) GetString(key string) (string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(string) - return str, ok - } - return "", false -} - -// GetBool gets a string value from the extensions -func (e Extensions) GetBool(key string) (bool, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - str, ok := v.(bool) - return str, ok - } - return false, false -} - -// GetStringSlice gets a string value from the extensions -func (e Extensions) GetStringSlice(key string) ([]string, bool) { - if v, ok := e[strings.ToLower(key)]; ok { - arr, isSlice := v.([]interface{}) - if !isSlice { - return nil, false - } - var strs []string - for _, iface := range arr { - str, isString := iface.(string) - if !isString { - return nil, false - } - strs = append(strs, str) - } - return strs, ok - } - return nil, false -} - -// VendorExtensible composition block. -type VendorExtensible struct { - Extensions Extensions -} - -// AddExtension adds an extension to this extensible object -func (v *VendorExtensible) AddExtension(key string, value interface{}) { - if value == nil { - return - } - if v.Extensions == nil { - v.Extensions = make(map[string]interface{}) - } - v.Extensions.Add(key, value) -} - -// MarshalJSON marshals the extensions to json -func (v VendorExtensible) MarshalJSON() ([]byte, error) { - toser := make(map[string]interface{}) - for k, v := range v.Extensions { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - toser[k] = v - } - } - return json.Marshal(toser) -} - -// UnmarshalJSON for this extensible object -func (v *VendorExtensible) UnmarshalJSON(data []byte) error { - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if v.Extensions == nil { - v.Extensions = map[string]interface{}{} - } - v.Extensions[k] = vv - } - } - return nil -} - -// InfoProps the properties for an info definition -type InfoProps struct { - Description string `json:"description,omitempty"` - Title string `json:"title,omitempty"` - TermsOfService string `json:"termsOfService,omitempty"` - Contact *ContactInfo `json:"contact,omitempty"` - License *License `json:"license,omitempty"` - Version string `json:"version,omitempty"` -} - -// Info object provides metadata about the API. -// The metadata can be used by the clients if needed, and can be presented in the Swagger-UI for convenience. -// -// For more information: http://goo.gl/8us55a#infoObject -type Info struct { - VendorExtensible - InfoProps -} - -// JSONLookup look up a value by the json property name -func (i Info) JSONLookup(token string) (interface{}, error) { - if ex, ok := i.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(i.InfoProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (i Info) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.InfoProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (i *Info) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &i.InfoProps); err != nil { - return err - } - return json.Unmarshal(data, &i.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/items.go b/vendor/github.com/go-openapi/spec/items.go deleted file mode 100644 index e2afb2133..000000000 --- a/vendor/github.com/go-openapi/spec/items.go +++ /dev/null @@ -1,234 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - jsonRef = "$ref" -) - -// SimpleSchema describe swagger simple schemas for parameters and headers -type SimpleSchema struct { - Type string `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Items *Items `json:"items,omitempty"` - CollectionFormat string `json:"collectionFormat,omitempty"` - Default interface{} `json:"default,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// TypeName return the type (or format) of a simple schema -func (s *SimpleSchema) TypeName() string { - if s.Format != "" { - return s.Format - } - return s.Type -} - -// ItemsTypeName yields the type of items in a simple schema array -func (s *SimpleSchema) ItemsTypeName() string { - if s.Items == nil { - return "" - } - return s.Items.TypeName() -} - -// Items a limited subset of JSON-Schema's items object. -// It is used by parameter definitions that are not located in "body". -// -// For more information: http://goo.gl/8us55a#items-object -type Items struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible -} - -// NewItems creates a new instance of items -func NewItems() *Items { - return &Items{} -} - -// Typed a fluent builder method for the type of item -func (i *Items) Typed(tpe, format string) *Items { - i.Type = tpe - i.Format = format - return i -} - -// AsNullable flags this schema as nullable. -func (i *Items) AsNullable() *Items { - i.Nullable = true - return i -} - -// CollectionOf a fluent builder method for an array item -func (i *Items) CollectionOf(items *Items, format string) *Items { - i.Type = jsonArray - i.Items = items - i.CollectionFormat = format - return i -} - -// WithDefault sets the default value on this item -func (i *Items) WithDefault(defaultValue interface{}) *Items { - i.Default = defaultValue - return i -} - -// WithMaxLength sets a max length value -func (i *Items) WithMaxLength(max int64) *Items { - i.MaxLength = &max - return i -} - -// WithMinLength sets a min length value -func (i *Items) WithMinLength(min int64) *Items { - i.MinLength = &min - return i -} - -// WithPattern sets a pattern value -func (i *Items) WithPattern(pattern string) *Items { - i.Pattern = pattern - return i -} - -// WithMultipleOf sets a multiple of value -func (i *Items) WithMultipleOf(number float64) *Items { - i.MultipleOf = &number - return i -} - -// WithMaximum sets a maximum number value -func (i *Items) WithMaximum(max float64, exclusive bool) *Items { - i.Maximum = &max - i.ExclusiveMaximum = exclusive - return i -} - -// WithMinimum sets a minimum number value -func (i *Items) WithMinimum(min float64, exclusive bool) *Items { - i.Minimum = &min - i.ExclusiveMinimum = exclusive - return i -} - -// WithEnum sets a the enum values (replace) -func (i *Items) WithEnum(values ...interface{}) *Items { - i.Enum = append([]interface{}{}, values...) - return i -} - -// WithMaxItems sets the max items -func (i *Items) WithMaxItems(size int64) *Items { - i.MaxItems = &size - return i -} - -// WithMinItems sets the min items -func (i *Items) WithMinItems(size int64) *Items { - i.MinItems = &size - return i -} - -// UniqueValues dictates that this array can only have unique items -func (i *Items) UniqueValues() *Items { - i.UniqueItems = true - return i -} - -// AllowDuplicates this array can have duplicates -func (i *Items) AllowDuplicates() *Items { - i.UniqueItems = false - return i -} - -// WithValidations is a fluent method to set Items validations -func (i *Items) WithValidations(val CommonValidations) *Items { - i.SetValidations(SchemaValidations{CommonValidations: val}) - return i -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (i *Items) UnmarshalJSON(data []byte) error { - var validations CommonValidations - if err := json.Unmarshal(data, &validations); err != nil { - return err - } - var ref Refable - if err := json.Unmarshal(data, &ref); err != nil { - return err - } - var simpleSchema SimpleSchema - if err := json.Unmarshal(data, &simpleSchema); err != nil { - return err - } - var vendorExtensible VendorExtensible - if err := json.Unmarshal(data, &vendorExtensible); err != nil { - return err - } - i.Refable = ref - i.CommonValidations = validations - i.SimpleSchema = simpleSchema - i.VendorExtensible = vendorExtensible - return nil -} - -// MarshalJSON converts this items object to JSON -func (i Items) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(i.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(i.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(i.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(i.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b4, b3, b1, b2), nil -} - -// JSONLookup look up a value by the json property name -func (i Items) JSONLookup(token string) (interface{}, error) { - if token == jsonRef { - return &i.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(i.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(i.SimpleSchema, token) - return r, err -} diff --git a/vendor/github.com/go-openapi/spec/license.go b/vendor/github.com/go-openapi/spec/license.go deleted file mode 100644 index b42f80368..000000000 --- a/vendor/github.com/go-openapi/spec/license.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/swag" -) - -// License information for the exposed API. -// -// For more information: http://goo.gl/8us55a#licenseObject -type License struct { - LicenseProps - VendorExtensible -} - -// LicenseProps holds the properties of a License object -type LicenseProps struct { - Name string `json:"name,omitempty"` - URL string `json:"url,omitempty"` -} - -// UnmarshalJSON hydrates License from json -func (l *License) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &l.LicenseProps); err != nil { - return err - } - return json.Unmarshal(data, &l.VendorExtensible) -} - -// MarshalJSON produces License as json -func (l License) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(l.LicenseProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(l.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} diff --git a/vendor/github.com/go-openapi/spec/normalizer.go b/vendor/github.com/go-openapi/spec/normalizer.go deleted file mode 100644 index d6c483971..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path" - "strings" -) - -const fileScheme = "file" - -// normalizeURI ensures that all $ref paths used internally by the expander are canonicalized. -// -// NOTE(windows): there is a tolerance over the strict URI format on windows. -// -// The normalizer accepts relative file URLs like 'Path\File.JSON' as well as absolute file URLs like -// 'C:\Path\file.Yaml'. -// -// Both are canonicalized with a "file://" scheme, slashes and a lower-cased path: -// 'file:///c:/path/file.yaml' -// -// URLs can be specified with a file scheme, like in 'file:///folder/file.json' or -// 'file:///c:\folder\File.json'. -// -// URLs like file://C:\folder are considered invalid (i.e. there is no host 'c:\folder') and a "repair" -// is attempted. -// -// The base path argument is assumed to be canonicalized (e.g. using normalizeBase()). -func normalizeURI(refPath, base string) string { - refURL, err := url.Parse(refPath) - if err != nil { - specLogger.Printf("warning: invalid URI in $ref %q: %v", refPath, err) - refURL, refPath = repairURI(refPath) - } - - fixWindowsURI(refURL, refPath) // noop on non-windows OS - - refURL.Path = path.Clean(refURL.Path) - if refURL.Path == "." { - refURL.Path = "" - } - - r := MustCreateRef(refURL.String()) - if r.IsCanonical() { - return refURL.String() - } - - baseURL, _ := url.Parse(base) - if path.IsAbs(refURL.Path) { - baseURL.Path = refURL.Path - } else if refURL.Path != "" { - baseURL.Path = path.Join(path.Dir(baseURL.Path), refURL.Path) - } - // copying fragment from ref to base - baseURL.Fragment = refURL.Fragment - - return baseURL.String() -} - -// denormalizeRef returns the simplest notation for a normalized $ref, given the path of the original root document. -// -// When calling this, we assume that: -// * $ref is a canonical URI -// * originalRelativeBase is a canonical URI -// -// denormalizeRef is currently used when we rewrite a $ref after a circular $ref has been detected. -// In this case, expansion stops and normally renders the internal canonical $ref. -// -// This internal $ref is eventually rebased to the original RelativeBase used for the expansion. -// -// There is a special case for schemas that are anchored with an "id": -// in that case, the rebasing is performed // against the id only if this is an anchor for the initial root document. -// All other intermediate "id"'s found along the way are ignored for the purpose of rebasing. -// -func denormalizeRef(ref *Ref, originalRelativeBase, id string) Ref { - debugLog("denormalizeRef called:\n$ref: %q\noriginal: %s\nroot ID:%s", ref.String(), originalRelativeBase, id) - - if ref.String() == "" || ref.IsRoot() || ref.HasFragmentOnly { - // short circuit: $ref to current doc - return *ref - } - - if id != "" { - idBaseURL, err := url.Parse(id) - if err == nil { // if the schema id is not usable as a URI, ignore it - if ref, ok := rebase(ref, idBaseURL, true); ok { // rebase, but keep references to root unchaged (do not want $ref: "") - // $ref relative to the ID of the schema in the root document - return ref - } - } - } - - originalRelativeBaseURL, _ := url.Parse(originalRelativeBase) - - r, _ := rebase(ref, originalRelativeBaseURL, false) - - return r -} - -func rebase(ref *Ref, v *url.URL, notEqual bool) (Ref, bool) { - var newBase url.URL - - u := ref.GetURL() - - if u.Scheme != v.Scheme || u.Host != v.Host { - return *ref, false - } - - docPath := v.Path - v.Path = path.Dir(v.Path) - - if v.Path == "." { - v.Path = "" - } else if !strings.HasSuffix(v.Path, "/") { - v.Path += "/" - } - - newBase.Fragment = u.Fragment - - if strings.HasPrefix(u.Path, docPath) { - newBase.Path = strings.TrimPrefix(u.Path, docPath) - } else { - newBase.Path = strings.TrimPrefix(u.Path, v.Path) - } - - if notEqual && newBase.Path == "" && newBase.Fragment == "" { - // do not want rebasing to end up in an empty $ref - return *ref, false - } - - if path.IsAbs(newBase.Path) { - // whenever we end up with an absolute path, specify the scheme and host - newBase.Scheme = v.Scheme - newBase.Host = v.Host - } - - return MustCreateRef(newBase.String()), true -} - -// normalizeRef canonicalize a Ref, using a canonical relativeBase as its absolute anchor -func normalizeRef(ref *Ref, relativeBase string) *Ref { - r := MustCreateRef(normalizeURI(ref.String(), relativeBase)) - return &r -} - -// normalizeBase performs a normalization of the input base path. -// -// This always yields a canonical URI (absolute), usable for the document cache. -// -// It ensures that all further internal work on basePath may safely assume -// a non-empty, cross-platform, canonical URI (i.e. absolute). -// -// This normalization tolerates windows paths (e.g. C:\x\y\File.dat) and transform this -// in a file:// URL with lower cased drive letter and path. -// -// See also: https://en.wikipedia.org/wiki/File_URI_scheme -func normalizeBase(in string) string { - u, err := url.Parse(in) - if err != nil { - specLogger.Printf("warning: invalid URI in RelativeBase %q: %v", in, err) - u, in = repairURI(in) - } - - u.Fragment = "" // any fragment in the base is irrelevant - - fixWindowsURI(u, in) // noop on non-windows OS - - u.Path = path.Clean(u.Path) - if u.Path == "." { // empty after Clean() - u.Path = "" - } - - if u.Scheme != "" { - if path.IsAbs(u.Path) || u.Scheme != fileScheme { - // this is absolute or explicitly not a local file: we're good - return u.String() - } - } - - // no scheme or file scheme with relative path: assume file and make it absolute - // enforce scheme file://... with absolute path. - // - // If the input path is relative, we anchor the path to the current working directory. - // NOTE: we may end up with a host component. Leave it unchanged: e.g. file://host/folder/file.json - - u.Scheme = fileScheme - u.Path = absPath(u.Path) // platform-dependent - u.RawQuery = "" // any query component is irrelevant for a base - return u.String() -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go b/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go deleted file mode 100644 index c8a064534..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer_nonwindows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build !windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "path/filepath" -) - -// absPath makes a file path absolute and compatible with a URI path component. -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - return anchored -} - -func repairURI(in string) (*url.URL, string) { - u, _ := url.Parse("") - debugLog("repaired URI: original: %q, repaired: %q", in, "") - return u, "" -} - -func fixWindowsURI(u *url.URL, in string) { -} diff --git a/vendor/github.com/go-openapi/spec/normalizer_windows.go b/vendor/github.com/go-openapi/spec/normalizer_windows.go deleted file mode 100644 index fe2d1ecd4..000000000 --- a/vendor/github.com/go-openapi/spec/normalizer_windows.go +++ /dev/null @@ -1,154 +0,0 @@ -// -build windows - -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "net/url" - "os" - "path" - "path/filepath" - "strings" -) - -// absPath makes a file path absolute and compatible with a URI path component -// -// The parameter must be a path, not an URI. -func absPath(in string) string { - // NOTE(windows): filepath.Abs exhibits a special behavior on windows for empty paths. - // See https://github.com/golang/go/issues/24441 - if in == "" { - in = "." - } - - anchored, err := filepath.Abs(in) - if err != nil { - specLogger.Printf("warning: could not resolve current working directory: %v", err) - return in - } - - pth := strings.ReplaceAll(strings.ToLower(anchored), `\`, `/`) - if !strings.HasPrefix(pth, "/") { - pth = "/" + pth - } - - return path.Clean(pth) -} - -// repairURI tolerates invalid file URIs with common typos -// such as 'file://E:\folder\file', that break the regular URL parser. -// -// Adopting the same defaults as for unixes (e.g. return an empty path) would -// result into a counter-intuitive result for that case (e.g. E:\folder\file is -// eventually resolved as the current directory). The repair will detect the missing "/". -// -// Note that this only works for the file scheme. -func repairURI(in string) (*url.URL, string) { - const prefix = fileScheme + "://" - if !strings.HasPrefix(in, prefix) { - // giving up: resolve to empty path - u, _ := url.Parse("") - - return u, "" - } - - // attempt the repair, stripping the scheme should be sufficient - u, _ := url.Parse(strings.TrimPrefix(in, prefix)) - debugLog("repaired URI: original: %q, repaired: %q", in, u.String()) - - return u, u.String() -} - -// fixWindowsURI tolerates an absolute file path on windows such as C:\Base\File.yaml or \\host\share\Base\File.yaml -// and makes it a canonical URI: file:///c:/base/file.yaml -// -// Catch 22 notes for Windows: -// -// * There may be a drive letter on windows (it is lower-cased) -// * There may be a share UNC, e.g. \\server\folder\data.xml -// * Paths are case insensitive -// * Paths may already contain slashes -// * Paths must be slashed -// -// NOTE: there is no escaping. "/" may be valid separators just like "\". -// We don't use ToSlash() (which escapes everything) because windows now also -// tolerates the use of "/". Hence, both C:\File.yaml and C:/File.yaml will work. -func fixWindowsURI(u *url.URL, in string) { - drive := filepath.VolumeName(in) - - if len(drive) > 0 { - if len(u.Scheme) == 1 && strings.EqualFold(u.Scheme, drive[:1]) { // a path with a drive letter - u.Scheme = fileScheme - u.Host = "" - u.Path = strings.Join([]string{drive, u.Opaque, u.Path}, `/`) // reconstruct the full path component (no fragment, no query) - } else if u.Host == "" && strings.HasPrefix(u.Path, drive) { // a path with a \\host volume - // NOTE: the special host@port syntax for UNC is not supported (yet) - u.Scheme = fileScheme - - // this is a modified version of filepath.Dir() to apply on the VolumeName itself - i := len(drive) - 1 - for i >= 0 && !os.IsPathSeparator(drive[i]) { - i-- - } - host := drive[:i] // \\host\share => host - - u.Path = strings.TrimPrefix(u.Path, host) - u.Host = strings.TrimPrefix(host, `\\`) - } - - u.Opaque = "" - u.Path = strings.ReplaceAll(strings.ToLower(u.Path), `\`, `/`) - - // ensure we form an absolute path - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - - u.Path = path.Clean(u.Path) - - return - } - - if u.Scheme == fileScheme { - // Handle dodgy cases for file://{...} URIs on windows. - // A canonical URI should always be followed by an absolute path. - // - // Examples: - // * file:///folder/file => valid, unchanged - // * file:///c:\folder\file => slashed - // * file:///./folder/file => valid, cleaned to remove the dot - // * file:///.\folder\file => remapped to cwd - // * file:///. => dodgy, remapped to / (consistent with the behavior on unix) - // * file:///.. => dodgy, remapped to / (consistent with the behavior on unix) - if (!path.IsAbs(u.Path) && !filepath.IsAbs(u.Path)) || (strings.HasPrefix(u.Path, `/.`) && strings.Contains(u.Path, `\`)) { - // ensure we form an absolute path - u.Path, _ = filepath.Abs(strings.TrimLeft(u.Path, `/`)) - if !strings.HasPrefix(u.Path, "/") { - u.Path = "/" + u.Path - } - } - u.Path = strings.ToLower(u.Path) - } - - // NOTE: lower case normalization does not propagate to inner resources, - // generated when rebasing: when joining a relative URI with a file to an absolute base, - // only the base is currently lower-cased. - // - // For now, we assume this is good enough for most use cases - // and try not to generate too many differences - // between the output produced on different platforms. - u.Path = path.Clean(strings.ReplaceAll(u.Path, `\`, `/`)) -} diff --git a/vendor/github.com/go-openapi/spec/operation.go b/vendor/github.com/go-openapi/spec/operation.go deleted file mode 100644 index 995ce6acb..000000000 --- a/vendor/github.com/go-openapi/spec/operation.go +++ /dev/null @@ -1,397 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "sort" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -func init() { - gob.Register(map[string]interface{}{}) - gob.Register([]interface{}{}) -} - -// OperationProps describes an operation -// -// NOTES: -// - schemes, when present must be from [http, https, ws, wss]: see validate -// - Security is handled as a special case: see MarshalJSON function -type OperationProps struct { - Description string `json:"description,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Tags []string `json:"tags,omitempty"` - Summary string `json:"summary,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - ID string `json:"operationId,omitempty"` - Deprecated bool `json:"deprecated,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` - Responses *Responses `json:"responses,omitempty"` -} - -// MarshalJSON takes care of serializing operation properties to JSON -// -// We use a custom marhaller here to handle a special cases related to -// the Security field. We need to preserve zero length slice -// while omitting the field when the value is nil/unset. -func (op OperationProps) MarshalJSON() ([]byte, error) { - type Alias OperationProps - if op.Security == nil { - return json.Marshal(&struct { - Security []map[string][]string `json:"security,omitempty"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) - } - return json.Marshal(&struct { - Security []map[string][]string `json:"security"` - *Alias - }{ - Security: op.Security, - Alias: (*Alias)(&op), - }) -} - -// Operation describes a single API operation on a path. -// -// For more information: http://goo.gl/8us55a#operationObject -type Operation struct { - VendorExtensible - OperationProps -} - -// SuccessResponse gets a success response model -func (o *Operation) SuccessResponse() (*Response, int, bool) { - if o.Responses == nil { - return nil, 0, false - } - - responseCodes := make([]int, 0, len(o.Responses.StatusCodeResponses)) - for k := range o.Responses.StatusCodeResponses { - if k >= 200 && k < 300 { - responseCodes = append(responseCodes, k) - } - } - if len(responseCodes) > 0 { - sort.Ints(responseCodes) - v := o.Responses.StatusCodeResponses[responseCodes[0]] - return &v, responseCodes[0], true - } - - return o.Responses.Default, 0, false -} - -// JSONLookup look up a value by the json property name -func (o Operation) JSONLookup(token string) (interface{}, error) { - if ex, ok := o.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(o.OperationProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (o *Operation) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &o.OperationProps); err != nil { - return err - } - return json.Unmarshal(data, &o.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (o Operation) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(o.OperationProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(o.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// NewOperation creates a new operation instance. -// It expects an ID as parameter but not passing an ID is also valid. -func NewOperation(id string) *Operation { - op := new(Operation) - op.ID = id - return op -} - -// WithID sets the ID property on this operation, allows for chaining. -func (o *Operation) WithID(id string) *Operation { - o.ID = id - return o -} - -// WithDescription sets the description on this operation, allows for chaining -func (o *Operation) WithDescription(description string) *Operation { - o.Description = description - return o -} - -// WithSummary sets the summary on this operation, allows for chaining -func (o *Operation) WithSummary(summary string) *Operation { - o.Summary = summary - return o -} - -// WithExternalDocs sets/removes the external docs for/from this operation. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (o *Operation) WithExternalDocs(description, url string) *Operation { - if description == "" && url == "" { - o.ExternalDocs = nil - return o - } - - if o.ExternalDocs == nil { - o.ExternalDocs = &ExternalDocumentation{} - } - o.ExternalDocs.Description = description - o.ExternalDocs.URL = url - return o -} - -// Deprecate marks the operation as deprecated -func (o *Operation) Deprecate() *Operation { - o.Deprecated = true - return o -} - -// Undeprecate marks the operation as not deprected -func (o *Operation) Undeprecate() *Operation { - o.Deprecated = false - return o -} - -// WithConsumes adds media types for incoming body values -func (o *Operation) WithConsumes(mediaTypes ...string) *Operation { - o.Consumes = append(o.Consumes, mediaTypes...) - return o -} - -// WithProduces adds media types for outgoing body values -func (o *Operation) WithProduces(mediaTypes ...string) *Operation { - o.Produces = append(o.Produces, mediaTypes...) - return o -} - -// WithTags adds tags for this operation -func (o *Operation) WithTags(tags ...string) *Operation { - o.Tags = append(o.Tags, tags...) - return o -} - -// AddParam adds a parameter to this operation, when a parameter for that location -// and with that name already exists it will be replaced -func (o *Operation) AddParam(param *Parameter) *Operation { - if param == nil { - return o - } - - for i, p := range o.Parameters { - if p.Name == param.Name && p.In == param.In { - params := append(o.Parameters[:i], *param) - params = append(params, o.Parameters[i+1:]...) - o.Parameters = params - return o - } - } - - o.Parameters = append(o.Parameters, *param) - return o -} - -// RemoveParam removes a parameter from the operation -func (o *Operation) RemoveParam(name, in string) *Operation { - for i, p := range o.Parameters { - if p.Name == name && p.In == in { - o.Parameters = append(o.Parameters[:i], o.Parameters[i+1:]...) - return o - } - } - return o -} - -// SecuredWith adds a security scope to this operation. -func (o *Operation) SecuredWith(name string, scopes ...string) *Operation { - o.Security = append(o.Security, map[string][]string{name: scopes}) - return o -} - -// WithDefaultResponse adds a default response to the operation. -// Passing a nil value will remove the response -func (o *Operation) WithDefaultResponse(response *Response) *Operation { - return o.RespondsWith(0, response) -} - -// RespondsWith adds a status code response to the operation. -// When the code is 0 the value of the response will be used as default response value. -// When the value of the response is nil it will be removed from the operation -func (o *Operation) RespondsWith(code int, response *Response) *Operation { - if o.Responses == nil { - o.Responses = new(Responses) - } - if code == 0 { - o.Responses.Default = response - return o - } - if response == nil { - delete(o.Responses.StatusCodeResponses, code) - return o - } - if o.Responses.StatusCodeResponses == nil { - o.Responses.StatusCodeResponses = make(map[int]Response) - } - o.Responses.StatusCodeResponses[code] = *response - return o -} - -type opsAlias OperationProps - -type gobAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *opsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (o Operation) GobEncode() ([]byte, error) { - raw := struct { - Ext VendorExtensible - Props OperationProps - }{ - Ext: o.VendorExtensible, - Props: o.OperationProps, - } - var b bytes.Buffer - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (o *Operation) GobDecode(b []byte) error { - var raw struct { - Ext VendorExtensible - Props OperationProps - } - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - o.VendorExtensible = raw.Ext - o.OperationProps = raw.Props - return nil -} - -// GobEncode provides a safe gob encoder for Operation, including empty security requirements -func (op OperationProps) GobEncode() ([]byte, error) { - raw := gobAlias{ - Alias: (*opsAlias)(&op), - } - - var b bytes.Buffer - if op.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(op.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(op.Security)) - for _, req := range op.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Operation, including empty security requirements -func (op *OperationProps) GobDecode(b []byte) error { - var raw gobAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *op = *(*OperationProps)(raw.Alias) - return nil -} diff --git a/vendor/github.com/go-openapi/spec/parameter.go b/vendor/github.com/go-openapi/spec/parameter.go deleted file mode 100644 index 2b2b89b67..000000000 --- a/vendor/github.com/go-openapi/spec/parameter.go +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// QueryParam creates a query parameter -func QueryParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "query"}} -} - -// HeaderParam creates a header parameter, this is always required by default -func HeaderParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "header", Required: true}} -} - -// PathParam creates a path parameter, this is always required -func PathParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "path", Required: true}} -} - -// BodyParam creates a body parameter -func BodyParam(name string, schema *Schema) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "body", Schema: schema}} -} - -// FormDataParam creates a body parameter -func FormDataParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}} -} - -// FileParam creates a body parameter -func FileParam(name string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name, In: "formData"}, - SimpleSchema: SimpleSchema{Type: "file"}} -} - -// SimpleArrayParam creates a param for a simple array (string, int, date etc) -func SimpleArrayParam(name, tpe, fmt string) *Parameter { - return &Parameter{ParamProps: ParamProps{Name: name}, - SimpleSchema: SimpleSchema{Type: jsonArray, CollectionFormat: "csv", - Items: &Items{SimpleSchema: SimpleSchema{Type: tpe, Format: fmt}}}} -} - -// ParamRef creates a parameter that's a json reference -func ParamRef(uri string) *Parameter { - p := new(Parameter) - p.Ref = MustCreateRef(uri) - return p -} - -// ParamProps describes the specific attributes of an operation parameter -// -// NOTE: -// - Schema is defined when "in" == "body": see validate -// - AllowEmptyValue is allowed where "in" == "query" || "formData" -type ParamProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - In string `json:"in,omitempty"` - Required bool `json:"required,omitempty"` - Schema *Schema `json:"schema,omitempty"` - AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` -} - -// Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). -// -// There are five possible parameter types. -// * Path - Used together with [Path Templating](#pathTemplating), where the parameter value is actually part -// of the operation's URL. This does not include the host or base path of the API. For example, in `/items/{itemId}`, -// the path parameter is `itemId`. -// * Query - Parameters that are appended to the URL. For example, in `/items?id=###`, the query parameter is `id`. -// * Header - Custom headers that are expected as part of the request. -// * Body - The payload that's appended to the HTTP request. Since there can only be one payload, there can only be -// _one_ body parameter. The name of the body parameter has no effect on the parameter itself and is used for -// documentation purposes only. Since Form parameters are also in the payload, body and form parameters cannot exist -// together for the same operation. -// * Form - Used to describe the payload of an HTTP request when either `application/x-www-form-urlencoded` or -// `multipart/form-data` are used as the content type of the request (in Swagger's definition, -// the [`consumes`](#operationConsumes) property of an operation). This is the only parameter type that can be used -// to send files, thus supporting the `file` type. Since form parameters are sent in the payload, they cannot be -// declared together with a body parameter for the same operation. Form parameters have a different format based on -// the content-type used (for further details, consult http://www.w3.org/TR/html401/interact/forms.html#h-17.13.4). -// * `application/x-www-form-urlencoded` - Similar to the format of Query parameters but as a payload. -// For example, `foo=1&bar=swagger` - both `foo` and `bar` are form parameters. This is normally used for simple -// parameters that are being transferred. -// * `multipart/form-data` - each parameter takes a section in the payload with an internal header. -// For example, for the header `Content-Disposition: form-data; name="submit-name"` the name of the parameter is -// `submit-name`. This type of form parameters is more commonly used for file transfers. -// -// For more information: http://goo.gl/8us55a#parameterObject -type Parameter struct { - Refable - CommonValidations - SimpleSchema - VendorExtensible - ParamProps -} - -// JSONLookup look up a value by the json property name -func (p Parameter) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - - r, _, err := jsonpointer.GetForToken(p.CommonValidations, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token) - if err != nil && !strings.HasPrefix(err.Error(), "object has no field") { - return nil, err - } - if r != nil { - return r, nil - } - r, _, err = jsonpointer.GetForToken(p.ParamProps, token) - return r, err -} - -// WithDescription a fluent builder method for the description of the parameter -func (p *Parameter) WithDescription(description string) *Parameter { - p.Description = description - return p -} - -// Named a fluent builder method to override the name of the parameter -func (p *Parameter) Named(name string) *Parameter { - p.Name = name - return p -} - -// WithLocation a fluent builder method to override the location of the parameter -func (p *Parameter) WithLocation(in string) *Parameter { - p.In = in - return p -} - -// Typed a fluent builder method for the type of the parameter value -func (p *Parameter) Typed(tpe, format string) *Parameter { - p.Type = tpe - p.Format = format - return p -} - -// CollectionOf a fluent builder method for an array parameter -func (p *Parameter) CollectionOf(items *Items, format string) *Parameter { - p.Type = jsonArray - p.Items = items - p.CollectionFormat = format - return p -} - -// WithDefault sets the default value on this parameter -func (p *Parameter) WithDefault(defaultValue interface{}) *Parameter { - p.AsOptional() // with default implies optional - p.Default = defaultValue - return p -} - -// AllowsEmptyValues flags this parameter as being ok with empty values -func (p *Parameter) AllowsEmptyValues() *Parameter { - p.AllowEmptyValue = true - return p -} - -// NoEmptyValues flags this parameter as not liking empty values -func (p *Parameter) NoEmptyValues() *Parameter { - p.AllowEmptyValue = false - return p -} - -// AsOptional flags this parameter as optional -func (p *Parameter) AsOptional() *Parameter { - p.Required = false - return p -} - -// AsRequired flags this parameter as required -func (p *Parameter) AsRequired() *Parameter { - if p.Default != nil { // with a default required makes no sense - return p - } - p.Required = true - return p -} - -// WithMaxLength sets a max length value -func (p *Parameter) WithMaxLength(max int64) *Parameter { - p.MaxLength = &max - return p -} - -// WithMinLength sets a min length value -func (p *Parameter) WithMinLength(min int64) *Parameter { - p.MinLength = &min - return p -} - -// WithPattern sets a pattern value -func (p *Parameter) WithPattern(pattern string) *Parameter { - p.Pattern = pattern - return p -} - -// WithMultipleOf sets a multiple of value -func (p *Parameter) WithMultipleOf(number float64) *Parameter { - p.MultipleOf = &number - return p -} - -// WithMaximum sets a maximum number value -func (p *Parameter) WithMaximum(max float64, exclusive bool) *Parameter { - p.Maximum = &max - p.ExclusiveMaximum = exclusive - return p -} - -// WithMinimum sets a minimum number value -func (p *Parameter) WithMinimum(min float64, exclusive bool) *Parameter { - p.Minimum = &min - p.ExclusiveMinimum = exclusive - return p -} - -// WithEnum sets a the enum values (replace) -func (p *Parameter) WithEnum(values ...interface{}) *Parameter { - p.Enum = append([]interface{}{}, values...) - return p -} - -// WithMaxItems sets the max items -func (p *Parameter) WithMaxItems(size int64) *Parameter { - p.MaxItems = &size - return p -} - -// WithMinItems sets the min items -func (p *Parameter) WithMinItems(size int64) *Parameter { - p.MinItems = &size - return p -} - -// UniqueValues dictates that this array can only have unique items -func (p *Parameter) UniqueValues() *Parameter { - p.UniqueItems = true - return p -} - -// AllowDuplicates this array can have duplicates -func (p *Parameter) AllowDuplicates() *Parameter { - p.UniqueItems = false - return p -} - -// WithValidations is a fluent method to set parameter validations -func (p *Parameter) WithValidations(val CommonValidations) *Parameter { - p.SetValidations(SchemaValidations{CommonValidations: val}) - return p -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Parameter) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.CommonValidations); err != nil { - return err - } - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.SimpleSchema); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.ParamProps) -} - -// MarshalJSON converts this items object to JSON -func (p Parameter) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.CommonValidations) - if err != nil { - return nil, err - } - b2, err := json.Marshal(p.SimpleSchema) - if err != nil { - return nil, err - } - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.ParamProps) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b3, b1, b2, b4, b5), nil -} diff --git a/vendor/github.com/go-openapi/spec/path_item.go b/vendor/github.com/go-openapi/spec/path_item.go deleted file mode 100644 index 68fc8e901..000000000 --- a/vendor/github.com/go-openapi/spec/path_item.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// PathItemProps the path item specific properties -type PathItemProps struct { - Get *Operation `json:"get,omitempty"` - Put *Operation `json:"put,omitempty"` - Post *Operation `json:"post,omitempty"` - Delete *Operation `json:"delete,omitempty"` - Options *Operation `json:"options,omitempty"` - Head *Operation `json:"head,omitempty"` - Patch *Operation `json:"patch,omitempty"` - Parameters []Parameter `json:"parameters,omitempty"` -} - -// PathItem describes the operations available on a single path. -// A Path Item may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// The path itself is still exposed to the documentation viewer but they will -// not know which operations and parameters are available. -// -// For more information: http://goo.gl/8us55a#pathItemObject -type PathItem struct { - Refable - VendorExtensible - PathItemProps -} - -// JSONLookup look up a value by the json property name -func (p PathItem) JSONLookup(token string) (interface{}, error) { - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - if token == jsonRef { - return &p.Ref, nil - } - r, _, err := jsonpointer.GetForToken(p.PathItemProps, token) - return r, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *PathItem) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &p.Refable); err != nil { - return err - } - if err := json.Unmarshal(data, &p.VendorExtensible); err != nil { - return err - } - return json.Unmarshal(data, &p.PathItemProps) -} - -// MarshalJSON converts this items object to JSON -func (p PathItem) MarshalJSON() ([]byte, error) { - b3, err := json.Marshal(p.Refable) - if err != nil { - return nil, err - } - b4, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - b5, err := json.Marshal(p.PathItemProps) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b3, b4, b5) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/paths.go b/vendor/github.com/go-openapi/spec/paths.go deleted file mode 100644 index 9dc82a290..000000000 --- a/vendor/github.com/go-openapi/spec/paths.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/go-openapi/swag" -) - -// Paths holds the relative paths to the individual endpoints. -// The path is appended to the [`basePath`](http://goo.gl/8us55a#swaggerBasePath) in order -// to construct the full URL. -// The Paths may be empty, due to [ACL constraints](http://goo.gl/8us55a#securityFiltering). -// -// For more information: http://goo.gl/8us55a#pathsObject -type Paths struct { - VendorExtensible - Paths map[string]PathItem `json:"-"` // custom serializer to flatten this, each entry must start with "/" -} - -// JSONLookup look up a value by the json property name -func (p Paths) JSONLookup(token string) (interface{}, error) { - if pi, ok := p.Paths[token]; ok { - return &pi, nil - } - if ex, ok := p.Extensions[token]; ok { - return &ex, nil - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (p *Paths) UnmarshalJSON(data []byte) error { - var res map[string]json.RawMessage - if err := json.Unmarshal(data, &res); err != nil { - return err - } - for k, v := range res { - if strings.HasPrefix(strings.ToLower(k), "x-") { - if p.Extensions == nil { - p.Extensions = make(map[string]interface{}) - } - var d interface{} - if err := json.Unmarshal(v, &d); err != nil { - return err - } - p.Extensions[k] = d - } - if strings.HasPrefix(k, "/") { - if p.Paths == nil { - p.Paths = make(map[string]PathItem) - } - var pi PathItem - if err := json.Unmarshal(v, &pi); err != nil { - return err - } - p.Paths[k] = pi - } - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (p Paths) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(p.VendorExtensible) - if err != nil { - return nil, err - } - - pths := make(map[string]PathItem) - for k, v := range p.Paths { - if strings.HasPrefix(k, "/") { - pths[k] = v - } - } - b2, err := json.Marshal(pths) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} diff --git a/vendor/github.com/go-openapi/spec/properties.go b/vendor/github.com/go-openapi/spec/properties.go deleted file mode 100644 index 2af13787a..000000000 --- a/vendor/github.com/go-openapi/spec/properties.go +++ /dev/null @@ -1,91 +0,0 @@ -package spec - -import ( - "bytes" - "encoding/json" - "reflect" - "sort" -) - -// OrderSchemaItem holds a named schema (e.g. from a property of an object) -type OrderSchemaItem struct { - Name string - Schema -} - -// OrderSchemaItems is a sortable slice of named schemas. -// The ordering is defined by the x-order schema extension. -type OrderSchemaItems []OrderSchemaItem - -// MarshalJSON produces a json object with keys defined by the name schemas -// of the OrderSchemaItems slice, keeping the original order of the slice. -func (items OrderSchemaItems) MarshalJSON() ([]byte, error) { - buf := bytes.NewBuffer(nil) - buf.WriteString("{") - for i := range items { - if i > 0 { - buf.WriteString(",") - } - buf.WriteString("\"") - buf.WriteString(items[i].Name) - buf.WriteString("\":") - bs, err := json.Marshal(&items[i].Schema) - if err != nil { - return nil, err - } - buf.Write(bs) - } - buf.WriteString("}") - return buf.Bytes(), nil -} - -func (items OrderSchemaItems) Len() int { return len(items) } -func (items OrderSchemaItems) Swap(i, j int) { items[i], items[j] = items[j], items[i] } -func (items OrderSchemaItems) Less(i, j int) (ret bool) { - ii, oki := items[i].Extensions.GetString("x-order") - ij, okj := items[j].Extensions.GetString("x-order") - if oki { - if okj { - defer func() { - if err := recover(); err != nil { - defer func() { - if err = recover(); err != nil { - ret = items[i].Name < items[j].Name - } - }() - ret = reflect.ValueOf(ii).String() < reflect.ValueOf(ij).String() - } - }() - return reflect.ValueOf(ii).Int() < reflect.ValueOf(ij).Int() - } - return true - } else if okj { - return false - } - return items[i].Name < items[j].Name -} - -// SchemaProperties is a map representing the properties of a Schema object. -// It knows how to transform its keys into an ordered slice. -type SchemaProperties map[string]Schema - -// ToOrderedSchemaItems transforms the map of properties into a sortable slice -func (properties SchemaProperties) ToOrderedSchemaItems() OrderSchemaItems { - items := make(OrderSchemaItems, 0, len(properties)) - for k, v := range properties { - items = append(items, OrderSchemaItem{ - Name: k, - Schema: v, - }) - } - sort.Sort(items) - return items -} - -// MarshalJSON produces properties as json, keeping their order. -func (properties SchemaProperties) MarshalJSON() ([]byte, error) { - if properties == nil { - return []byte("null"), nil - } - return json.Marshal(properties.ToOrderedSchemaItems()) -} diff --git a/vendor/github.com/go-openapi/spec/ref.go b/vendor/github.com/go-openapi/spec/ref.go deleted file mode 100644 index b0ef9bd9c..000000000 --- a/vendor/github.com/go-openapi/spec/ref.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "net/http" - "os" - "path/filepath" - - "github.com/go-openapi/jsonreference" -) - -// Refable is a struct for things that accept a $ref property -type Refable struct { - Ref Ref -} - -// MarshalJSON marshals the ref to json -func (r Refable) MarshalJSON() ([]byte, error) { - return r.Ref.MarshalJSON() -} - -// UnmarshalJSON unmarshalss the ref from json -func (r *Refable) UnmarshalJSON(d []byte) error { - return json.Unmarshal(d, &r.Ref) -} - -// Ref represents a json reference that is potentially resolved -type Ref struct { - jsonreference.Ref -} - -// RemoteURI gets the remote uri part of the ref -func (r *Ref) RemoteURI() string { - if r.String() == "" { - return "" - } - - u := *r.GetURL() - u.Fragment = "" - return u.String() -} - -// IsValidURI returns true when the url the ref points to can be found -func (r *Ref) IsValidURI(basepaths ...string) bool { - if r.String() == "" { - return true - } - - v := r.RemoteURI() - if v == "" { - return true - } - - if r.HasFullURL { - //nolint:noctx,gosec - rr, err := http.Get(v) - if err != nil { - return false - } - defer rr.Body.Close() - - return rr.StatusCode/100 == 2 - } - - if !(r.HasFileScheme || r.HasFullFilePath || r.HasURLPathOnly) { - return false - } - - // check for local file - pth := v - if r.HasURLPathOnly { - base := "." - if len(basepaths) > 0 { - base = filepath.Dir(filepath.Join(basepaths...)) - } - p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth))) - if e != nil { - return false - } - pth = p - } - - fi, err := os.Stat(filepath.ToSlash(pth)) - if err != nil { - return false - } - - return !fi.IsDir() -} - -// Inherits creates a new reference from a parent and a child -// If the child cannot inherit from the parent, an error is returned -func (r *Ref) Inherits(child Ref) (*Ref, error) { - ref, err := r.Ref.Inherits(child.Ref) - if err != nil { - return nil, err - } - return &Ref{Ref: *ref}, nil -} - -// NewRef creates a new instance of a ref object -// returns an error when the reference uri is an invalid uri -func NewRef(refURI string) (Ref, error) { - ref, err := jsonreference.New(refURI) - if err != nil { - return Ref{}, err - } - return Ref{Ref: ref}, nil -} - -// MustCreateRef creates a ref object but panics when refURI is invalid. -// Use the NewRef method for a version that returns an error. -func MustCreateRef(refURI string) Ref { - return Ref{Ref: jsonreference.MustCreateRef(refURI)} -} - -// MarshalJSON marshals this ref into a JSON object -func (r Ref) MarshalJSON() ([]byte, error) { - str := r.String() - if str == "" { - if r.IsRoot() { - return []byte(`{"$ref":""}`), nil - } - return []byte("{}"), nil - } - v := map[string]interface{}{"$ref": str} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshals this ref from a JSON object -func (r *Ref) UnmarshalJSON(d []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(d, &v); err != nil { - return err - } - return r.fromMap(v) -} - -// GobEncode provides a safe gob encoder for Ref -func (r Ref) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw, err := r.MarshalJSON() - if err != nil { - return nil, err - } - err = gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Ref -func (r *Ref) GobDecode(b []byte) error { - var raw []byte - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - return json.Unmarshal(raw, r) -} - -func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/resolver.go b/vendor/github.com/go-openapi/spec/resolver.go deleted file mode 100644 index 47d1ee13f..000000000 --- a/vendor/github.com/go-openapi/spec/resolver.go +++ /dev/null @@ -1,127 +0,0 @@ -package spec - -import ( - "fmt" - - "github.com/go-openapi/swag" -) - -func resolveAnyWithBase(root interface{}, ref *Ref, result interface{}, options *ExpandOptions) error { - options = optionsOrDefault(options) - resolver := defaultSchemaLoader(root, options, nil, nil) - - if err := resolver.Resolve(ref, result, options.RelativeBase); err != nil { - return err - } - - return nil -} - -// ResolveRefWithBase resolves a reference against a context root with preservation of base path -func ResolveRefWithBase(root interface{}, ref *Ref, options *ExpandOptions) (*Schema, error) { - result := new(Schema) - - if err := resolveAnyWithBase(root, ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveRef resolves a reference for a schema against a context root -// ref is guaranteed to be in root (no need to go to external files) -// -// ResolveRef is ONLY called from the code generation module -func ResolveRef(root interface{}, ref *Ref) (*Schema, error) { - res, _, err := ref.GetPointer().Get(root) - if err != nil { - return nil, err - } - - switch sch := res.(type) { - case Schema: - return &sch, nil - case *Schema: - return sch, nil - case map[string]interface{}: - newSch := new(Schema) - if err = swag.DynamicJSONToStruct(sch, newSch); err != nil { - return nil, err - } - return newSch, nil - default: - return nil, fmt.Errorf("type: %T: %w", sch, ErrUnknownTypeForReference) - } -} - -// ResolveParameterWithBase resolves a parameter reference against a context root and base path -func ResolveParameterWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Parameter, error) { - result := new(Parameter) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveParameter resolves a parameter reference against a context root -func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) { - return ResolveParameterWithBase(root, ref, nil) -} - -// ResolveResponseWithBase resolves response a reference against a context root and base path -func ResolveResponseWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Response, error) { - result := new(Response) - - err := resolveAnyWithBase(root, &ref, result, options) - if err != nil { - return nil, err - } - - return result, nil -} - -// ResolveResponse resolves response a reference against a context root -func ResolveResponse(root interface{}, ref Ref) (*Response, error) { - return ResolveResponseWithBase(root, ref, nil) -} - -// ResolvePathItemWithBase resolves response a path item against a context root and base path -func ResolvePathItemWithBase(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - result := new(PathItem) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolvePathItem resolves response a path item against a context root and base path -// -// Deprecated: use ResolvePathItemWithBase instead -func ResolvePathItem(root interface{}, ref Ref, options *ExpandOptions) (*PathItem, error) { - return ResolvePathItemWithBase(root, ref, options) -} - -// ResolveItemsWithBase resolves parameter items reference against a context root and base path. -// -// NOTE: stricly speaking, this construct is not supported by Swagger 2.0. -// Similarly, $ref are forbidden in response headers. -func ResolveItemsWithBase(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - result := new(Items) - - if err := resolveAnyWithBase(root, &ref, result, options); err != nil { - return nil, err - } - - return result, nil -} - -// ResolveItems resolves parameter items reference against a context root and base path. -// -// Deprecated: use ResolveItemsWithBase instead -func ResolveItems(root interface{}, ref Ref, options *ExpandOptions) (*Items, error) { - return ResolveItemsWithBase(root, ref, options) -} diff --git a/vendor/github.com/go-openapi/spec/response.go b/vendor/github.com/go-openapi/spec/response.go deleted file mode 100644 index 0340b60d8..000000000 --- a/vendor/github.com/go-openapi/spec/response.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// ResponseProps properties specific to a response -type ResponseProps struct { - Description string `json:"description"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` -} - -// Response describes a single response from an API Operation. -// -// For more information: http://goo.gl/8us55a#responseObject -type Response struct { - Refable - ResponseProps - VendorExtensible -} - -// JSONLookup look up a value by the json property name -func (r Response) JSONLookup(token string) (interface{}, error) { - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if token == "$ref" { - return &r.Ref, nil - } - ptr, _, err := jsonpointer.GetForToken(r.ResponseProps, token) - return ptr, err -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Response) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponseProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.Refable); err != nil { - return err - } - return json.Unmarshal(data, &r.VendorExtensible) -} - -// MarshalJSON converts this items object to JSON -func (r Response) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if r.Ref.String() == "" { - // when there is no $ref, empty description is rendered as an empty string - b1, err = json.Marshal(r.ResponseProps) - } else { - // when there is $ref inside the schema, description should be omitempty-ied - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Schema *Schema `json:"schema,omitempty"` - Headers map[string]Header `json:"headers,omitempty"` - Examples map[string]interface{} `json:"examples,omitempty"` - }{ - Description: r.ResponseProps.Description, - Schema: r.ResponseProps.Schema, - Examples: r.ResponseProps.Examples, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(r.Refable) - if err != nil { - return nil, err - } - b3, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2, b3), nil -} - -// NewResponse creates a new response instance -func NewResponse() *Response { - return new(Response) -} - -// ResponseRef creates a response as a json reference -func ResponseRef(url string) *Response { - resp := NewResponse() - resp.Ref = MustCreateRef(url) - return resp -} - -// WithDescription sets the description on this response, allows for chaining -func (r *Response) WithDescription(description string) *Response { - r.Description = description - return r -} - -// WithSchema sets the schema on this response, allows for chaining. -// Passing a nil argument removes the schema from this response -func (r *Response) WithSchema(schema *Schema) *Response { - r.Schema = schema - return r -} - -// AddHeader adds a header to this response -func (r *Response) AddHeader(name string, header *Header) *Response { - if header == nil { - return r.RemoveHeader(name) - } - if r.Headers == nil { - r.Headers = make(map[string]Header) - } - r.Headers[name] = *header - return r -} - -// RemoveHeader removes a header from this response -func (r *Response) RemoveHeader(name string) *Response { - delete(r.Headers, name) - return r -} - -// AddExample adds an example to this response -func (r *Response) AddExample(mediaType string, example interface{}) *Response { - if r.Examples == nil { - r.Examples = make(map[string]interface{}) - } - r.Examples[mediaType] = example - return r -} diff --git a/vendor/github.com/go-openapi/spec/responses.go b/vendor/github.com/go-openapi/spec/responses.go deleted file mode 100644 index 4efb6f868..000000000 --- a/vendor/github.com/go-openapi/spec/responses.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/go-openapi/swag" -) - -// Responses is a container for the expected responses of an operation. -// The container maps a HTTP response code to the expected response. -// It is not expected from the documentation to necessarily cover all possible HTTP response codes, -// since they may not be known in advance. However, it is expected from the documentation to cover -// a successful operation response and any known errors. -// -// The `default` can be used a default response object for all HTTP codes that are not covered -// individually by the specification. -// -// The `Responses Object` MUST contain at least one response code, and it SHOULD be the response -// for a successful operation call. -// -// For more information: http://goo.gl/8us55a#responsesObject -type Responses struct { - VendorExtensible - ResponsesProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (r Responses) JSONLookup(token string) (interface{}, error) { - if token == "default" { - return r.Default, nil - } - if ex, ok := r.Extensions[token]; ok { - return &ex, nil - } - if i, err := strconv.Atoi(token); err == nil { - if scr, ok := r.StatusCodeResponses[i]; ok { - return scr, nil - } - } - return nil, fmt.Errorf("object has no field %q", token) -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (r *Responses) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &r.ResponsesProps); err != nil { - return err - } - if err := json.Unmarshal(data, &r.VendorExtensible); err != nil { - return err - } - if reflect.DeepEqual(ResponsesProps{}, r.ResponsesProps) { - r.ResponsesProps = ResponsesProps{} - } - return nil -} - -// MarshalJSON converts this items object to JSON -func (r Responses) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(r.ResponsesProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(r.VendorExtensible) - if err != nil { - return nil, err - } - concated := swag.ConcatJSON(b1, b2) - return concated, nil -} - -// ResponsesProps describes all responses for an operation. -// It tells what is the default response and maps all responses with a -// HTTP status code. -type ResponsesProps struct { - Default *Response - StatusCodeResponses map[int]Response -} - -// MarshalJSON marshals responses as JSON -func (r ResponsesProps) MarshalJSON() ([]byte, error) { - toser := map[string]Response{} - if r.Default != nil { - toser["default"] = *r.Default - } - for k, v := range r.StatusCodeResponses { - toser[strconv.Itoa(k)] = v - } - return json.Marshal(toser) -} - -// UnmarshalJSON unmarshals responses from JSON -func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]Response - if err := json.Unmarshal(data, &res); err != nil { - return nil - } - if v, ok := res["default"]; ok { - r.Default = &v - delete(res, "default") - } - for k, v := range res { - if nk, err := strconv.Atoi(k); err == nil { - if r.StatusCodeResponses == nil { - r.StatusCodeResponses = map[int]Response{} - } - r.StatusCodeResponses[nk] = v - } - } - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema.go b/vendor/github.com/go-openapi/spec/schema.go deleted file mode 100644 index a8d0f737a..000000000 --- a/vendor/github.com/go-openapi/spec/schema.go +++ /dev/null @@ -1,646 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// BooleanProperty creates a boolean property -func BooleanProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"boolean"}}} -} - -// BoolProperty creates a boolean property -func BoolProperty() *Schema { return BooleanProperty() } - -// StringProperty creates a string property -func StringProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// CharProperty creates a string property -func CharProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}}} -} - -// Float64Property creates a float64/double property -func Float64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "double"}} -} - -// Float32Property creates a float32/float property -func Float32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"number"}, Format: "float"}} -} - -// Int8Property creates an int8 property -func Int8Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int8"}} -} - -// Int16Property creates an int16 property -func Int16Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int16"}} -} - -// Int32Property creates an int32 property -func Int32Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int32"}} -} - -// Int64Property creates an int64 property -func Int64Property() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"integer"}, Format: "int64"}} -} - -// StrFmtProperty creates a property for the named string format -func StrFmtProperty(format string) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: format}} -} - -// DateProperty creates a date property -func DateProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date"}} -} - -// DateTimeProperty creates a date time property -func DateTimeProperty() *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"string"}, Format: "date-time"}} -} - -// MapProperty creates a map property -func MapProperty(property *Schema) *Schema { - return &Schema{SchemaProps: SchemaProps{Type: []string{"object"}, - AdditionalProperties: &SchemaOrBool{Allows: true, Schema: property}}} -} - -// RefProperty creates a ref property -func RefProperty(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// RefSchema creates a ref property -func RefSchema(name string) *Schema { - return &Schema{SchemaProps: SchemaProps{Ref: MustCreateRef(name)}} -} - -// ArrayProperty creates an array property -func ArrayProperty(items *Schema) *Schema { - if items == nil { - return &Schema{SchemaProps: SchemaProps{Type: []string{"array"}}} - } - return &Schema{SchemaProps: SchemaProps{Items: &SchemaOrArray{Schema: items}, Type: []string{"array"}}} -} - -// ComposedSchema creates a schema with allOf -func ComposedSchema(schemas ...Schema) *Schema { - s := new(Schema) - s.AllOf = schemas - return s -} - -// SchemaURL represents a schema url -type SchemaURL string - -// MarshalJSON marshal this to JSON -func (r SchemaURL) MarshalJSON() ([]byte, error) { - if r == "" { - return []byte("{}"), nil - } - v := map[string]interface{}{"$schema": string(r)} - return json.Marshal(v) -} - -// UnmarshalJSON unmarshal this from JSON -func (r *SchemaURL) UnmarshalJSON(data []byte) error { - var v map[string]interface{} - if err := json.Unmarshal(data, &v); err != nil { - return err - } - return r.fromMap(v) -} - -func (r *SchemaURL) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - if vv, ok := v["$schema"]; ok { - if str, ok := vv.(string); ok { - u, err := url.Parse(str) - if err != nil { - return err - } - - *r = SchemaURL(u.String()) - } - } - return nil -} - -// SchemaProps describes a JSON schema (draft 4) -type SchemaProps struct { - ID string `json:"id,omitempty"` - Ref Ref `json:"-"` - Schema SchemaURL `json:"-"` - Description string `json:"description,omitempty"` - Type StringOrArray `json:"type,omitempty"` - Nullable bool `json:"nullable,omitempty"` - Format string `json:"format,omitempty"` - Title string `json:"title,omitempty"` - Default interface{} `json:"default,omitempty"` - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` - Required []string `json:"required,omitempty"` - Items *SchemaOrArray `json:"items,omitempty"` - AllOf []Schema `json:"allOf,omitempty"` - OneOf []Schema `json:"oneOf,omitempty"` - AnyOf []Schema `json:"anyOf,omitempty"` - Not *Schema `json:"not,omitempty"` - Properties SchemaProperties `json:"properties,omitempty"` - AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitempty"` - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - Dependencies Dependencies `json:"dependencies,omitempty"` - AdditionalItems *SchemaOrBool `json:"additionalItems,omitempty"` - Definitions Definitions `json:"definitions,omitempty"` -} - -// SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) -type SwaggerSchemaProps struct { - Discriminator string `json:"discriminator,omitempty"` - ReadOnly bool `json:"readOnly,omitempty"` - XML *XMLObject `json:"xml,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` - Example interface{} `json:"example,omitempty"` -} - -// Schema the schema object allows the definition of input and output data types. -// These types can be objects, but also primitives and arrays. -// This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) -// and uses a predefined subset of it. -// On top of this subset, there are extensions provided by this specification to allow for more complete documentation. -// -// For more information: http://goo.gl/8us55a#schemaObject -type Schema struct { - VendorExtensible - SchemaProps - SwaggerSchemaProps - ExtraProps map[string]interface{} `json:"-"` -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s Schema) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - if ex, ok := s.ExtraProps[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SchemaProps, token) - if r != nil || (err != nil && !strings.HasPrefix(err.Error(), "object has no field")) { - return r, err - } - r, _, err = jsonpointer.GetForToken(s.SwaggerSchemaProps, token) - return r, err -} - -// WithID sets the id for this schema, allows for chaining -func (s *Schema) WithID(id string) *Schema { - s.ID = id - return s -} - -// WithTitle sets the title for this schema, allows for chaining -func (s *Schema) WithTitle(title string) *Schema { - s.Title = title - return s -} - -// WithDescription sets the description for this schema, allows for chaining -func (s *Schema) WithDescription(description string) *Schema { - s.Description = description - return s -} - -// WithProperties sets the properties for this schema -func (s *Schema) WithProperties(schemas map[string]Schema) *Schema { - s.Properties = schemas - return s -} - -// SetProperty sets a property on this schema -func (s *Schema) SetProperty(name string, schema Schema) *Schema { - if s.Properties == nil { - s.Properties = make(map[string]Schema) - } - s.Properties[name] = schema - return s -} - -// WithAllOf sets the all of property -func (s *Schema) WithAllOf(schemas ...Schema) *Schema { - s.AllOf = schemas - return s -} - -// WithMaxProperties sets the max number of properties an object can have -func (s *Schema) WithMaxProperties(max int64) *Schema { - s.MaxProperties = &max - return s -} - -// WithMinProperties sets the min number of properties an object must have -func (s *Schema) WithMinProperties(min int64) *Schema { - s.MinProperties = &min - return s -} - -// Typed sets the type of this schema for a single value item -func (s *Schema) Typed(tpe, format string) *Schema { - s.Type = []string{tpe} - s.Format = format - return s -} - -// AddType adds a type with potential format to the types for this schema -func (s *Schema) AddType(tpe, format string) *Schema { - s.Type = append(s.Type, tpe) - if format != "" { - s.Format = format - } - return s -} - -// AsNullable flags this schema as nullable. -func (s *Schema) AsNullable() *Schema { - s.Nullable = true - return s -} - -// CollectionOf a fluent builder method for an array parameter -func (s *Schema) CollectionOf(items Schema) *Schema { - s.Type = []string{jsonArray} - s.Items = &SchemaOrArray{Schema: &items} - return s -} - -// WithDefault sets the default value on this parameter -func (s *Schema) WithDefault(defaultValue interface{}) *Schema { - s.Default = defaultValue - return s -} - -// WithRequired flags this parameter as required -func (s *Schema) WithRequired(items ...string) *Schema { - s.Required = items - return s -} - -// AddRequired adds field names to the required properties array -func (s *Schema) AddRequired(items ...string) *Schema { - s.Required = append(s.Required, items...) - return s -} - -// WithMaxLength sets a max length value -func (s *Schema) WithMaxLength(max int64) *Schema { - s.MaxLength = &max - return s -} - -// WithMinLength sets a min length value -func (s *Schema) WithMinLength(min int64) *Schema { - s.MinLength = &min - return s -} - -// WithPattern sets a pattern value -func (s *Schema) WithPattern(pattern string) *Schema { - s.Pattern = pattern - return s -} - -// WithMultipleOf sets a multiple of value -func (s *Schema) WithMultipleOf(number float64) *Schema { - s.MultipleOf = &number - return s -} - -// WithMaximum sets a maximum number value -func (s *Schema) WithMaximum(max float64, exclusive bool) *Schema { - s.Maximum = &max - s.ExclusiveMaximum = exclusive - return s -} - -// WithMinimum sets a minimum number value -func (s *Schema) WithMinimum(min float64, exclusive bool) *Schema { - s.Minimum = &min - s.ExclusiveMinimum = exclusive - return s -} - -// WithEnum sets a the enum values (replace) -func (s *Schema) WithEnum(values ...interface{}) *Schema { - s.Enum = append([]interface{}{}, values...) - return s -} - -// WithMaxItems sets the max items -func (s *Schema) WithMaxItems(size int64) *Schema { - s.MaxItems = &size - return s -} - -// WithMinItems sets the min items -func (s *Schema) WithMinItems(size int64) *Schema { - s.MinItems = &size - return s -} - -// UniqueValues dictates that this array can only have unique items -func (s *Schema) UniqueValues() *Schema { - s.UniqueItems = true - return s -} - -// AllowDuplicates this array can have duplicates -func (s *Schema) AllowDuplicates() *Schema { - s.UniqueItems = false - return s -} - -// AddToAllOf adds a schema to the allOf property -func (s *Schema) AddToAllOf(schemas ...Schema) *Schema { - s.AllOf = append(s.AllOf, schemas...) - return s -} - -// WithDiscriminator sets the name of the discriminator field -func (s *Schema) WithDiscriminator(discriminator string) *Schema { - s.Discriminator = discriminator - return s -} - -// AsReadOnly flags this schema as readonly -func (s *Schema) AsReadOnly() *Schema { - s.ReadOnly = true - return s -} - -// AsWritable flags this schema as writeable (not read-only) -func (s *Schema) AsWritable() *Schema { - s.ReadOnly = false - return s -} - -// WithExample sets the example for this schema -func (s *Schema) WithExample(example interface{}) *Schema { - s.Example = example - return s -} - -// WithExternalDocs sets/removes the external docs for/from this schema. -// When you pass empty strings as params the external documents will be removed. -// When you pass non-empty string as one value then those values will be used on the external docs object. -// So when you pass a non-empty description, you should also pass the url and vice versa. -func (s *Schema) WithExternalDocs(description, url string) *Schema { - if description == "" && url == "" { - s.ExternalDocs = nil - return s - } - - if s.ExternalDocs == nil { - s.ExternalDocs = &ExternalDocumentation{} - } - s.ExternalDocs.Description = description - s.ExternalDocs.URL = url - return s -} - -// WithXMLName sets the xml name for the object -func (s *Schema) WithXMLName(name string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Name = name - return s -} - -// WithXMLNamespace sets the xml namespace for the object -func (s *Schema) WithXMLNamespace(namespace string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Namespace = namespace - return s -} - -// WithXMLPrefix sets the xml prefix for the object -func (s *Schema) WithXMLPrefix(prefix string) *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Prefix = prefix - return s -} - -// AsXMLAttribute flags this object as xml attribute -func (s *Schema) AsXMLAttribute() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = true - return s -} - -// AsXMLElement flags this object as an xml node -func (s *Schema) AsXMLElement() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Attribute = false - return s -} - -// AsWrappedXML flags this object as wrapped, this is mostly useful for array types -func (s *Schema) AsWrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = true - return s -} - -// AsUnwrappedXML flags this object as an xml node -func (s *Schema) AsUnwrappedXML() *Schema { - if s.XML == nil { - s.XML = new(XMLObject) - } - s.XML.Wrapped = false - return s -} - -// SetValidations defines all schema validations. -// -// NOTE: Required, ReadOnly, AllOf, AnyOf, OneOf and Not are not considered. -func (s *Schema) SetValidations(val SchemaValidations) { - s.Maximum = val.Maximum - s.ExclusiveMaximum = val.ExclusiveMaximum - s.Minimum = val.Minimum - s.ExclusiveMinimum = val.ExclusiveMinimum - s.MaxLength = val.MaxLength - s.MinLength = val.MinLength - s.Pattern = val.Pattern - s.MaxItems = val.MaxItems - s.MinItems = val.MinItems - s.UniqueItems = val.UniqueItems - s.MultipleOf = val.MultipleOf - s.Enum = val.Enum - s.MinProperties = val.MinProperties - s.MaxProperties = val.MaxProperties - s.PatternProperties = val.PatternProperties -} - -// WithValidations is a fluent method to set schema validations -func (s *Schema) WithValidations(val SchemaValidations) *Schema { - s.SetValidations(val) - return s -} - -// Validations returns a clone of the validations for this schema -func (s Schema) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: CommonValidations{ - Maximum: s.Maximum, - ExclusiveMaximum: s.ExclusiveMaximum, - Minimum: s.Minimum, - ExclusiveMinimum: s.ExclusiveMinimum, - MaxLength: s.MaxLength, - MinLength: s.MinLength, - Pattern: s.Pattern, - MaxItems: s.MaxItems, - MinItems: s.MinItems, - UniqueItems: s.UniqueItems, - MultipleOf: s.MultipleOf, - Enum: s.Enum, - }, - MinProperties: s.MinProperties, - MaxProperties: s.MaxProperties, - PatternProperties: s.PatternProperties, - } -} - -// MarshalJSON marshal this to JSON -func (s Schema) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SchemaProps) - if err != nil { - return nil, fmt.Errorf("schema props %v", err) - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, fmt.Errorf("vendor props %v", err) - } - b3, err := s.Ref.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("ref prop %v", err) - } - b4, err := s.Schema.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("schema prop %v", err) - } - b5, err := json.Marshal(s.SwaggerSchemaProps) - if err != nil { - return nil, fmt.Errorf("common validations %v", err) - } - var b6 []byte - if s.ExtraProps != nil { - jj, err := json.Marshal(s.ExtraProps) - if err != nil { - return nil, fmt.Errorf("extra props %v", err) - } - b6 = jj - } - return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *Schema) UnmarshalJSON(data []byte) error { - props := struct { - SchemaProps - SwaggerSchemaProps - }{} - if err := json.Unmarshal(data, &props); err != nil { - return err - } - - sch := Schema{ - SchemaProps: props.SchemaProps, - SwaggerSchemaProps: props.SwaggerSchemaProps, - } - - var d map[string]interface{} - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - _ = sch.Ref.fromMap(d) - _ = sch.Schema.fromMap(d) - - delete(d, "$ref") - delete(d, "$schema") - for _, pn := range swag.DefaultJSONNameProvider.GetJSONNames(s) { - delete(d, pn) - } - - for k, vv := range d { - lk := strings.ToLower(k) - if strings.HasPrefix(lk, "x-") { - if sch.Extensions == nil { - sch.Extensions = map[string]interface{}{} - } - sch.Extensions[k] = vv - continue - } - if sch.ExtraProps == nil { - sch.ExtraProps = map[string]interface{}{} - } - sch.ExtraProps[k] = vv - } - - *s = sch - - return nil -} diff --git a/vendor/github.com/go-openapi/spec/schema_loader.go b/vendor/github.com/go-openapi/spec/schema_loader.go deleted file mode 100644 index b81175afd..000000000 --- a/vendor/github.com/go-openapi/spec/schema_loader.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - "fmt" - "log" - "net/url" - "reflect" - "strings" - - "github.com/go-openapi/swag" -) - -// PathLoader is a function to use when loading remote refs. -// -// This is a package level default. It may be overridden or bypassed by -// specifying the loader in ExpandOptions. -// -// NOTE: if you are using the go-openapi/loads package, it will override -// this value with its own default (a loader to retrieve YAML documents as -// well as JSON ones). -var PathLoader = func(pth string) (json.RawMessage, error) { - data, err := swag.LoadFromFileOrHTTP(pth) - if err != nil { - return nil, err - } - return json.RawMessage(data), nil -} - -// resolverContext allows to share a context during spec processing. -// At the moment, it just holds the index of circular references found. -type resolverContext struct { - // circulars holds all visited circular references, to shortcircuit $ref resolution. - // - // This structure is privately instantiated and needs not be locked against - // concurrent access, unless we chose to implement a parallel spec walking. - circulars map[string]bool - basePath string - loadDoc func(string) (json.RawMessage, error) - rootID string -} - -func newResolverContext(options *ExpandOptions) *resolverContext { - expandOptions := optionsOrDefault(options) - - // path loader may be overridden by options - var loader func(string) (json.RawMessage, error) - if expandOptions.PathLoader == nil { - loader = PathLoader - } else { - loader = expandOptions.PathLoader - } - - return &resolverContext{ - circulars: make(map[string]bool), - basePath: expandOptions.RelativeBase, // keep the root base path in context - loadDoc: loader, - } -} - -type schemaLoader struct { - root interface{} - options *ExpandOptions - cache ResolutionCache - context *resolverContext -} - -func (r *schemaLoader) transitiveResolver(basePath string, ref Ref) *schemaLoader { - if ref.IsRoot() || ref.HasFragmentOnly { - return r - } - - baseRef := MustCreateRef(basePath) - currentRef := normalizeRef(&ref, basePath) - if strings.HasPrefix(currentRef.String(), baseRef.String()) { - return r - } - - // set a new root against which to resolve - rootURL := currentRef.GetURL() - rootURL.Fragment = "" - root, _ := r.cache.Get(rootURL.String()) - - // shallow copy of resolver options to set a new RelativeBase when - // traversing multiple documents - newOptions := r.options - newOptions.RelativeBase = rootURL.String() - - return defaultSchemaLoader(root, newOptions, r.cache, r.context) -} - -func (r *schemaLoader) updateBasePath(transitive *schemaLoader, basePath string) string { - if transitive != r { - if transitive.options != nil && transitive.options.RelativeBase != "" { - return normalizeBase(transitive.options.RelativeBase) - } - } - - return basePath -} - -func (r *schemaLoader) resolveRef(ref *Ref, target interface{}, basePath string) error { - tgt := reflect.ValueOf(target) - if tgt.Kind() != reflect.Ptr { - return ErrResolveRefNeedsAPointer - } - - if ref.GetURL() == nil { - return nil - } - - var ( - res interface{} - data interface{} - err error - ) - - // Resolve against the root if it isn't nil, and if ref is pointing at the root, or has a fragment only which means - // it is pointing somewhere in the root. - root := r.root - if (ref.IsRoot() || ref.HasFragmentOnly) && root == nil && basePath != "" { - if baseRef, erb := NewRef(basePath); erb == nil { - root, _, _, _ = r.load(baseRef.GetURL()) - } - } - - if (ref.IsRoot() || ref.HasFragmentOnly) && root != nil { - data = root - } else { - baseRef := normalizeRef(ref, basePath) - data, _, _, err = r.load(baseRef.GetURL()) - if err != nil { - return err - } - } - - res = data - if ref.String() != "" { - res, _, err = ref.GetPointer().Get(data) - if err != nil { - return err - } - } - return swag.DynamicJSONToStruct(res, target) -} - -func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) { - debugLog("loading schema from url: %s", refURL) - toFetch := *refURL - toFetch.Fragment = "" - - var err error - pth := toFetch.String() - normalized := normalizeBase(pth) - debugLog("loading doc from: %s", normalized) - - unescaped, err := url.PathUnescape(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - u := url.URL{Path: unescaped} - - data, fromCache := r.cache.Get(u.RequestURI()) - if fromCache { - return data, toFetch, fromCache, nil - } - - b, err := r.context.loadDoc(normalized) - if err != nil { - return nil, url.URL{}, false, err - } - - var doc interface{} - if err := json.Unmarshal(b, &doc); err != nil { - return nil, url.URL{}, false, err - } - r.cache.Set(normalized, doc) - - return doc, toFetch, fromCache, nil -} - -// isCircular detects cycles in sequences of $ref. -// -// It relies on a private context (which needs not be locked). -func (r *schemaLoader) isCircular(ref *Ref, basePath string, parentRefs ...string) (foundCycle bool) { - normalizedRef := normalizeURI(ref.String(), basePath) - if _, ok := r.context.circulars[normalizedRef]; ok { - // circular $ref has been already detected in another explored cycle - foundCycle = true - return - } - foundCycle = swag.ContainsStrings(parentRefs, normalizedRef) // normalized windows url's are lower cased - if foundCycle { - r.context.circulars[normalizedRef] = true - } - return -} - -// Resolve resolves a reference against basePath and stores the result in target. -// -// Resolve is not in charge of following references: it only resolves ref by following its URL. -// -// If the schema the ref is referring to holds nested refs, Resolve doesn't resolve them. -// -// If basePath is an empty string, ref is resolved against the root schema stored in the schemaLoader struct -func (r *schemaLoader) Resolve(ref *Ref, target interface{}, basePath string) error { - return r.resolveRef(ref, target, basePath) -} - -func (r *schemaLoader) deref(input interface{}, parentRefs []string, basePath string) error { - var ref *Ref - switch refable := input.(type) { - case *Schema: - ref = &refable.Ref - case *Parameter: - ref = &refable.Ref - case *Response: - ref = &refable.Ref - case *PathItem: - ref = &refable.Ref - default: - return fmt.Errorf("unsupported type: %T: %w", input, ErrDerefUnsupportedType) - } - - curRef := ref.String() - if curRef == "" { - return nil - } - - normalizedRef := normalizeRef(ref, basePath) - normalizedBasePath := normalizedRef.RemoteURI() - - if r.isCircular(normalizedRef, basePath, parentRefs...) { - return nil - } - - if err := r.resolveRef(ref, input, basePath); r.shouldStopOnError(err) { - return err - } - - if ref.String() == "" || ref.String() == curRef { - // done with rereferencing - return nil - } - - parentRefs = append(parentRefs, normalizedRef.String()) - return r.deref(input, parentRefs, normalizedBasePath) -} - -func (r *schemaLoader) shouldStopOnError(err error) bool { - if err != nil && !r.options.ContinueOnError { - return true - } - - if err != nil { - log.Println(err) - } - - return false -} - -func (r *schemaLoader) setSchemaID(target interface{}, id, basePath string) (string, string) { - debugLog("schema has ID: %s", id) - - // handling the case when id is a folder - // remember that basePath has to point to a file - var refPath string - if strings.HasSuffix(id, "/") { - // ensure this is detected as a file, not a folder - refPath = fmt.Sprintf("%s%s", id, "placeholder.json") - } else { - refPath = id - } - - // updates the current base path - // * important: ID can be a relative path - // * registers target to be fetchable from the new base proposed by this id - newBasePath := normalizeURI(refPath, basePath) - - // store found IDs for possible future reuse in $ref - r.cache.Set(newBasePath, target) - - // the root document has an ID: all $ref relative to that ID may - // be rebased relative to the root document - if basePath == r.context.basePath { - debugLog("root document is a schema with ID: %s (normalized as:%s)", id, newBasePath) - r.context.rootID = newBasePath - } - - return newBasePath, refPath -} - -func defaultSchemaLoader( - root interface{}, - expandOptions *ExpandOptions, - cache ResolutionCache, - context *resolverContext) *schemaLoader { - - if expandOptions == nil { - expandOptions = &ExpandOptions{} - } - - cache = cacheOrDefault(cache) - - if expandOptions.RelativeBase == "" { - // if no relative base is provided, assume the root document - // contains all $ref, or at least, that the relative documents - // may be resolved from the current working directory. - expandOptions.RelativeBase = baseForRoot(root, cache) - } - debugLog("effective expander options: %#v", expandOptions) - - if context == nil { - context = newResolverContext(expandOptions) - } - - return &schemaLoader{ - root: root, - options: expandOptions, - cache: cache, - context: context, - } -} diff --git a/vendor/github.com/go-openapi/spec/security_scheme.go b/vendor/github.com/go-openapi/spec/security_scheme.go deleted file mode 100644 index 9d0bdae90..000000000 --- a/vendor/github.com/go-openapi/spec/security_scheme.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -const ( - basic = "basic" - apiKey = "apiKey" - oauth2 = "oauth2" - implicit = "implicit" - password = "password" - application = "application" - accessCode = "accessCode" -) - -// BasicAuth creates a basic auth security scheme -func BasicAuth() *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: basic}} -} - -// APIKeyAuth creates an api key auth security scheme -func APIKeyAuth(fieldName, valueSource string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{Type: apiKey, Name: fieldName, In: valueSource}} -} - -// OAuth2Implicit creates an implicit flow oauth2 security scheme -func OAuth2Implicit(authorizationURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: implicit, - AuthorizationURL: authorizationURL, - }} -} - -// OAuth2Password creates a password flow oauth2 security scheme -func OAuth2Password(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: password, - TokenURL: tokenURL, - }} -} - -// OAuth2Application creates an application flow oauth2 security scheme -func OAuth2Application(tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: application, - TokenURL: tokenURL, - }} -} - -// OAuth2AccessToken creates an access token flow oauth2 security scheme -func OAuth2AccessToken(authorizationURL, tokenURL string) *SecurityScheme { - return &SecurityScheme{SecuritySchemeProps: SecuritySchemeProps{ - Type: oauth2, - Flow: accessCode, - AuthorizationURL: authorizationURL, - TokenURL: tokenURL, - }} -} - -// SecuritySchemeProps describes a swagger security scheme in the securityDefinitions section -type SecuritySchemeProps struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 -} - -// AddScope adds a scope to this security scheme -func (s *SecuritySchemeProps) AddScope(scope, description string) { - if s.Scopes == nil { - s.Scopes = make(map[string]string) - } - s.Scopes[scope] = description -} - -// SecurityScheme allows the definition of a security scheme that can be used by the operations. -// Supported schemes are basic authentication, an API key (either as a header or as a query parameter) -// and OAuth2's common flows (implicit, password, application and access code). -// -// For more information: http://goo.gl/8us55a#securitySchemeObject -type SecurityScheme struct { - VendorExtensible - SecuritySchemeProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SecurityScheme) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(s.SecuritySchemeProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (s SecurityScheme) MarshalJSON() ([]byte, error) { - var ( - b1 []byte - err error - ) - - if s.Type == oauth2 && (s.Flow == "implicit" || s.Flow == "accessCode") { - // when oauth2 for implicit or accessCode flows, empty AuthorizationURL is added as empty string - b1, err = json.Marshal(s.SecuritySchemeProps) - } else { - // when not oauth2, empty AuthorizationURL should be omitted - b1, err = json.Marshal(struct { - Description string `json:"description,omitempty"` - Type string `json:"type"` - Name string `json:"name,omitempty"` // api key - In string `json:"in,omitempty"` // api key - Flow string `json:"flow,omitempty"` // oauth2 - AuthorizationURL string `json:"authorizationUrl,omitempty"` // oauth2 - TokenURL string `json:"tokenUrl,omitempty"` // oauth2 - Scopes map[string]string `json:"scopes,omitempty"` // oauth2 - }{ - Description: s.Description, - Type: s.Type, - Name: s.Name, - In: s.In, - Flow: s.Flow, - AuthorizationURL: s.AuthorizationURL, - TokenURL: s.TokenURL, - Scopes: s.Scopes, - }) - } - if err != nil { - return nil, err - } - - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (s *SecurityScheme) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/spec.go b/vendor/github.com/go-openapi/spec/spec.go deleted file mode 100644 index 7d38b6e62..000000000 --- a/vendor/github.com/go-openapi/spec/spec.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" -) - -//go:generate curl -L --progress -o ./schemas/v2/schema.json http://swagger.io/v2/schema.json -//go:generate curl -L --progress -o ./schemas/jsonschema-draft-04.json http://json-schema.org/draft-04/schema -//go:generate go-bindata -pkg=spec -prefix=./schemas -ignore=.*\.md ./schemas/... -//go:generate perl -pi -e s,Json,JSON,g bindata.go - -const ( - // SwaggerSchemaURL the url for the swagger 2.0 schema to validate specs - SwaggerSchemaURL = "http://swagger.io/v2/schema.json#" - // JSONSchemaURL the url for the json schema schema - JSONSchemaURL = "http://json-schema.org/draft-04/schema#" -) - -// MustLoadJSONSchemaDraft04 panics when Swagger20Schema returns an error -func MustLoadJSONSchemaDraft04() *Schema { - d, e := JSONSchemaDraft04() - if e != nil { - panic(e) - } - return d -} - -// JSONSchemaDraft04 loads the json schema document for json shema draft04 -func JSONSchemaDraft04() (*Schema, error) { - b, err := Asset("jsonschema-draft-04.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} - -// MustLoadSwagger20Schema panics when Swagger20Schema returns an error -func MustLoadSwagger20Schema() *Schema { - d, e := Swagger20Schema() - if e != nil { - panic(e) - } - return d -} - -// Swagger20Schema loads the swagger 2.0 schema from the embedded assets -func Swagger20Schema() (*Schema, error) { - - b, err := Asset("v2/schema.json") - if err != nil { - return nil, err - } - - schema := new(Schema) - if err := json.Unmarshal(b, schema); err != nil { - return nil, err - } - return schema, nil -} diff --git a/vendor/github.com/go-openapi/spec/swagger.go b/vendor/github.com/go-openapi/spec/swagger.go deleted file mode 100644 index 44722ffd5..000000000 --- a/vendor/github.com/go-openapi/spec/swagger.go +++ /dev/null @@ -1,448 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "bytes" - "encoding/gob" - "encoding/json" - "fmt" - "strconv" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// Swagger this is the root document object for the API specification. -// It combines what previously was the Resource Listing and API Declaration (version 1.2 and earlier) -// together into one document. -// -// For more information: http://goo.gl/8us55a#swagger-object- -type Swagger struct { - VendorExtensible - SwaggerProps -} - -// JSONLookup look up a value by the json property name -func (s Swagger) JSONLookup(token string) (interface{}, error) { - if ex, ok := s.Extensions[token]; ok { - return &ex, nil - } - r, _, err := jsonpointer.GetForToken(s.SwaggerProps, token) - return r, err -} - -// MarshalJSON marshals this swagger structure to json -func (s Swagger) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SwaggerProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON unmarshals a swagger spec from json -func (s *Swagger) UnmarshalJSON(data []byte) error { - var sw Swagger - if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { - return err - } - if err := json.Unmarshal(data, &sw.VendorExtensible); err != nil { - return err - } - *s = sw - return nil -} - -// GobEncode provides a safe gob encoder for Swagger, including extensions -func (s Swagger) GobEncode() ([]byte, error) { - var b bytes.Buffer - raw := struct { - Props SwaggerProps - Ext VendorExtensible - }{ - Props: s.SwaggerProps, - Ext: s.VendorExtensible, - } - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for Swagger, including extensions -func (s *Swagger) GobDecode(b []byte) error { - var raw struct { - Props SwaggerProps - Ext VendorExtensible - } - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - s.SwaggerProps = raw.Props - s.VendorExtensible = raw.Ext - return nil -} - -// SwaggerProps captures the top-level properties of an Api specification -// -// NOTE: validation rules -// - the scheme, when present must be from [http, https, ws, wss] -// - BasePath must start with a leading "/" -// - Paths is required -type SwaggerProps struct { - ID string `json:"id,omitempty"` - Consumes []string `json:"consumes,omitempty"` - Produces []string `json:"produces,omitempty"` - Schemes []string `json:"schemes,omitempty"` - Swagger string `json:"swagger,omitempty"` - Info *Info `json:"info,omitempty"` - Host string `json:"host,omitempty"` - BasePath string `json:"basePath,omitempty"` - Paths *Paths `json:"paths"` - Definitions Definitions `json:"definitions,omitempty"` - Parameters map[string]Parameter `json:"parameters,omitempty"` - Responses map[string]Response `json:"responses,omitempty"` - SecurityDefinitions SecurityDefinitions `json:"securityDefinitions,omitempty"` - Security []map[string][]string `json:"security,omitempty"` - Tags []Tag `json:"tags,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -type swaggerPropsAlias SwaggerProps - -type gobSwaggerPropsAlias struct { - Security []map[string]struct { - List []string - Pad bool - } - Alias *swaggerPropsAlias - SecurityIsEmpty bool -} - -// GobEncode provides a safe gob encoder for SwaggerProps, including empty security requirements -func (o SwaggerProps) GobEncode() ([]byte, error) { - raw := gobSwaggerPropsAlias{ - Alias: (*swaggerPropsAlias)(&o), - } - - var b bytes.Buffer - if o.Security == nil { - // nil security requirement - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - if len(o.Security) == 0 { - // empty, but non-nil security requirement - raw.SecurityIsEmpty = true - raw.Alias.Security = nil - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err - } - - raw.Security = make([]map[string]struct { - List []string - Pad bool - }, 0, len(o.Security)) - for _, req := range o.Security { - v := make(map[string]struct { - List []string - Pad bool - }, len(req)) - for k, val := range req { - v[k] = struct { - List []string - Pad bool - }{ - List: val, - } - } - raw.Security = append(raw.Security, v) - } - - err := gob.NewEncoder(&b).Encode(raw) - return b.Bytes(), err -} - -// GobDecode provides a safe gob decoder for SwaggerProps, including empty security requirements -func (o *SwaggerProps) GobDecode(b []byte) error { - var raw gobSwaggerPropsAlias - - buf := bytes.NewBuffer(b) - err := gob.NewDecoder(buf).Decode(&raw) - if err != nil { - return err - } - if raw.Alias == nil { - return nil - } - - switch { - case raw.SecurityIsEmpty: - // empty, but non-nil security requirement - raw.Alias.Security = []map[string][]string{} - case len(raw.Alias.Security) == 0: - // nil security requirement - raw.Alias.Security = nil - default: - raw.Alias.Security = make([]map[string][]string, 0, len(raw.Security)) - for _, req := range raw.Security { - v := make(map[string][]string, len(req)) - for k, val := range req { - v[k] = make([]string, 0, len(val.List)) - v[k] = append(v[k], val.List...) - } - raw.Alias.Security = append(raw.Alias.Security, v) - } - } - - *o = *(*SwaggerProps)(raw.Alias) - return nil -} - -// Dependencies represent a dependencies property -type Dependencies map[string]SchemaOrStringArray - -// SchemaOrBool represents a schema or boolean value, is biased towards true for the boolean property -type SchemaOrBool struct { - Allows bool - Schema *Schema -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrBool) JSONLookup(token string) (interface{}, error) { - if token == "allows" { - return s.Allows, nil - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -var jsTrue = []byte("true") -var jsFalse = []byte("false") - -// MarshalJSON convert this object to JSON -func (s SchemaOrBool) MarshalJSON() ([]byte, error) { - if s.Schema != nil { - return json.Marshal(s.Schema) - } - - if s.Schema == nil && !s.Allows { - return jsFalse, nil - } - return jsTrue, nil -} - -// UnmarshalJSON converts this bool or schema object from a JSON structure -func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { - var nw SchemaOrBool - if len(data) >= 4 { - if data[0] == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - nw.Allows = !(data[0] == 'f' && data[1] == 'a' && data[2] == 'l' && data[3] == 's' && data[4] == 'e') - } - *s = nw - return nil -} - -// SchemaOrStringArray represents a schema or a string array -type SchemaOrStringArray struct { - Schema *Schema - Property []string -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrStringArray) JSONLookup(token string) (interface{}, error) { - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { - if len(s.Property) > 0 { - return json.Marshal(s.Property) - } - if s.Schema != nil { - return json.Marshal(s.Schema) - } - return []byte("null"), nil -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - var nw SchemaOrStringArray - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Property); err != nil { - return err - } - } - *s = nw - return nil -} - -// Definitions contains the models explicitly defined in this spec -// An object to hold data types that can be consumed and produced by operations. -// These data types can be primitives, arrays or models. -// -// For more information: http://goo.gl/8us55a#definitionsObject -type Definitions map[string]Schema - -// SecurityDefinitions a declaration of the security schemes available to be used in the specification. -// This does not enforce the security schemes on the operations and only serves to provide -// the relevant details for each scheme. -// -// For more information: http://goo.gl/8us55a#securityDefinitionsObject -type SecurityDefinitions map[string]*SecurityScheme - -// StringOrArray represents a value that can either be a string -// or an array of strings. Mainly here for serialization purposes -type StringOrArray []string - -// Contains returns true when the value is contained in the slice -func (s StringOrArray) Contains(value string) bool { - for _, str := range s { - if str == value { - return true - } - } - return false -} - -// JSONLookup implements an interface to customize json pointer lookup -func (s SchemaOrArray) JSONLookup(token string) (interface{}, error) { - if _, err := strconv.Atoi(token); err == nil { - r, _, err := jsonpointer.GetForToken(s.Schemas, token) - return r, err - } - r, _, err := jsonpointer.GetForToken(s.Schema, token) - return r, err -} - -// UnmarshalJSON unmarshals this string or array object from a JSON array or JSON string -func (s *StringOrArray) UnmarshalJSON(data []byte) error { - var first byte - if len(data) > 1 { - first = data[0] - } - - if first == '[' { - var parsed []string - if err := json.Unmarshal(data, &parsed); err != nil { - return err - } - *s = StringOrArray(parsed) - return nil - } - - var single interface{} - if err := json.Unmarshal(data, &single); err != nil { - return err - } - if single == nil { - return nil - } - switch v := single.(type) { - case string: - *s = StringOrArray([]string{v}) - return nil - default: - return fmt.Errorf("only string or array is allowed, not %T", single) - } -} - -// MarshalJSON converts this string or array to a JSON array or JSON string -func (s StringOrArray) MarshalJSON() ([]byte, error) { - if len(s) == 1 { - return json.Marshal([]string(s)[0]) - } - return json.Marshal([]string(s)) -} - -// SchemaOrArray represents a value that can either be a Schema -// or an array of Schema. Mainly here for serialization purposes -type SchemaOrArray struct { - Schema *Schema - Schemas []Schema -} - -// Len returns the number of schemas in this property -func (s SchemaOrArray) Len() int { - if s.Schema != nil { - return 1 - } - return len(s.Schemas) -} - -// ContainsType returns true when one of the schemas is of the specified type -func (s *SchemaOrArray) ContainsType(name string) bool { - if s.Schema != nil { - return s.Schema.Type != nil && s.Schema.Type.Contains(name) - } - return false -} - -// MarshalJSON converts this schema object or array into JSON structure -func (s SchemaOrArray) MarshalJSON() ([]byte, error) { - if len(s.Schemas) > 0 { - return json.Marshal(s.Schemas) - } - return json.Marshal(s.Schema) -} - -// UnmarshalJSON converts this schema object or array from a JSON structure -func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { - var nw SchemaOrArray - var first byte - if len(data) > 1 { - first = data[0] - } - if first == '{' { - var sch Schema - if err := json.Unmarshal(data, &sch); err != nil { - return err - } - nw.Schema = &sch - } - if first == '[' { - if err := json.Unmarshal(data, &nw.Schemas); err != nil { - return err - } - } - *s = nw - return nil -} - -// vim:set ft=go noet sts=2 sw=2 ts=2: diff --git a/vendor/github.com/go-openapi/spec/tag.go b/vendor/github.com/go-openapi/spec/tag.go deleted file mode 100644 index faa3d3de1..000000000 --- a/vendor/github.com/go-openapi/spec/tag.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -import ( - "encoding/json" - - "github.com/go-openapi/jsonpointer" - "github.com/go-openapi/swag" -) - -// TagProps describe a tag entry in the top level tags section of a swagger spec -type TagProps struct { - Description string `json:"description,omitempty"` - Name string `json:"name,omitempty"` - ExternalDocs *ExternalDocumentation `json:"externalDocs,omitempty"` -} - -// NewTag creates a new tag -func NewTag(name, description string, externalDocs *ExternalDocumentation) Tag { - return Tag{TagProps: TagProps{Description: description, Name: name, ExternalDocs: externalDocs}} -} - -// Tag allows adding meta data to a single tag that is used by the -// [Operation Object](http://goo.gl/8us55a#operationObject). -// It is not mandatory to have a Tag Object per tag used there. -// -// For more information: http://goo.gl/8us55a#tagObject -type Tag struct { - VendorExtensible - TagProps -} - -// JSONLookup implements an interface to customize json pointer lookup -func (t Tag) JSONLookup(token string) (interface{}, error) { - if ex, ok := t.Extensions[token]; ok { - return &ex, nil - } - - r, _, err := jsonpointer.GetForToken(t.TagProps, token) - return r, err -} - -// MarshalJSON marshal this to JSON -func (t Tag) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(t.TagProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(t.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON marshal this from JSON -func (t *Tag) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &t.TagProps); err != nil { - return err - } - return json.Unmarshal(data, &t.VendorExtensible) -} diff --git a/vendor/github.com/go-openapi/spec/validations.go b/vendor/github.com/go-openapi/spec/validations.go deleted file mode 100644 index 6360a8ea7..000000000 --- a/vendor/github.com/go-openapi/spec/validations.go +++ /dev/null @@ -1,215 +0,0 @@ -package spec - -// CommonValidations describe common JSON-schema validations -type CommonValidations struct { - Maximum *float64 `json:"maximum,omitempty"` - ExclusiveMaximum bool `json:"exclusiveMaximum,omitempty"` - Minimum *float64 `json:"minimum,omitempty"` - ExclusiveMinimum bool `json:"exclusiveMinimum,omitempty"` - MaxLength *int64 `json:"maxLength,omitempty"` - MinLength *int64 `json:"minLength,omitempty"` - Pattern string `json:"pattern,omitempty"` - MaxItems *int64 `json:"maxItems,omitempty"` - MinItems *int64 `json:"minItems,omitempty"` - UniqueItems bool `json:"uniqueItems,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty"` - Enum []interface{} `json:"enum,omitempty"` -} - -// SetValidations defines all validations for a simple schema. -// -// NOTE: the input is the larger set of validations available for schemas. -// For simple schemas, MinProperties and MaxProperties are ignored. -func (v *CommonValidations) SetValidations(val SchemaValidations) { - v.Maximum = val.Maximum - v.ExclusiveMaximum = val.ExclusiveMaximum - v.Minimum = val.Minimum - v.ExclusiveMinimum = val.ExclusiveMinimum - v.MaxLength = val.MaxLength - v.MinLength = val.MinLength - v.Pattern = val.Pattern - v.MaxItems = val.MaxItems - v.MinItems = val.MinItems - v.UniqueItems = val.UniqueItems - v.MultipleOf = val.MultipleOf - v.Enum = val.Enum -} - -type clearedValidation struct { - Validation string - Value interface{} -} - -type clearedValidations []clearedValidation - -func (c clearedValidations) apply(cbs []func(string, interface{})) { - for _, cb := range cbs { - for _, cleared := range c { - cb(cleared.Validation, cleared.Value) - } - } -} - -// ClearNumberValidations clears all number validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearNumberValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 5) - defer func() { - done.apply(cbs) - }() - - if v.Minimum != nil { - done = append(done, clearedValidation{Validation: "minimum", Value: v.Minimum}) - v.Minimum = nil - } - if v.Maximum != nil { - done = append(done, clearedValidation{Validation: "maximum", Value: v.Maximum}) - v.Maximum = nil - } - if v.ExclusiveMaximum { - done = append(done, clearedValidation{Validation: "exclusiveMaximum", Value: v.ExclusiveMaximum}) - v.ExclusiveMaximum = false - } - if v.ExclusiveMinimum { - done = append(done, clearedValidation{Validation: "exclusiveMinimum", Value: v.ExclusiveMinimum}) - v.ExclusiveMinimum = false - } - if v.MultipleOf != nil { - done = append(done, clearedValidation{Validation: "multipleOf", Value: v.MultipleOf}) - v.MultipleOf = nil - } -} - -// ClearStringValidations clears all string validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearStringValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.Pattern != "" { - done = append(done, clearedValidation{Validation: "pattern", Value: v.Pattern}) - v.Pattern = "" - } - if v.MinLength != nil { - done = append(done, clearedValidation{Validation: "minLength", Value: v.MinLength}) - v.MinLength = nil - } - if v.MaxLength != nil { - done = append(done, clearedValidation{Validation: "maxLength", Value: v.MaxLength}) - v.MaxLength = nil - } -} - -// ClearArrayValidations clears all array validations. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *CommonValidations) ClearArrayValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxItems != nil { - done = append(done, clearedValidation{Validation: "maxItems", Value: v.MaxItems}) - v.MaxItems = nil - } - if v.MinItems != nil { - done = append(done, clearedValidation{Validation: "minItems", Value: v.MinItems}) - v.MinItems = nil - } - if v.UniqueItems { - done = append(done, clearedValidation{Validation: "uniqueItems", Value: v.UniqueItems}) - v.UniqueItems = false - } -} - -// Validations returns a clone of the validations for a simple schema. -// -// NOTE: in the context of simple schema objects, MinProperties, MaxProperties -// and PatternProperties remain unset. -func (v CommonValidations) Validations() SchemaValidations { - return SchemaValidations{ - CommonValidations: v, - } -} - -// HasNumberValidations indicates if the validations are for numbers or integers -func (v CommonValidations) HasNumberValidations() bool { - return v.Maximum != nil || v.Minimum != nil || v.MultipleOf != nil -} - -// HasStringValidations indicates if the validations are for strings -func (v CommonValidations) HasStringValidations() bool { - return v.MaxLength != nil || v.MinLength != nil || v.Pattern != "" -} - -// HasArrayValidations indicates if the validations are for arrays -func (v CommonValidations) HasArrayValidations() bool { - return v.MaxItems != nil || v.MinItems != nil || v.UniqueItems -} - -// HasEnum indicates if the validation includes some enum constraint -func (v CommonValidations) HasEnum() bool { - return len(v.Enum) > 0 -} - -// SchemaValidations describes the validation properties of a schema -// -// NOTE: at this moment, this is not embedded in SchemaProps because this would induce a breaking change -// in the exported members: all initializers using litterals would fail. -type SchemaValidations struct { - CommonValidations - - PatternProperties SchemaProperties `json:"patternProperties,omitempty"` - MaxProperties *int64 `json:"maxProperties,omitempty"` - MinProperties *int64 `json:"minProperties,omitempty"` -} - -// HasObjectValidations indicates if the validations are for objects -func (v SchemaValidations) HasObjectValidations() bool { - return v.MaxProperties != nil || v.MinProperties != nil || v.PatternProperties != nil -} - -// SetValidations for schema validations -func (v *SchemaValidations) SetValidations(val SchemaValidations) { - v.CommonValidations.SetValidations(val) - v.PatternProperties = val.PatternProperties - v.MaxProperties = val.MaxProperties - v.MinProperties = val.MinProperties -} - -// Validations for a schema -func (v SchemaValidations) Validations() SchemaValidations { - val := v.CommonValidations.Validations() - val.PatternProperties = v.PatternProperties - val.MinProperties = v.MinProperties - val.MaxProperties = v.MaxProperties - return val -} - -// ClearObjectValidations returns a clone of the validations with all object validations cleared. -// -// Some callbacks may be set by the caller to capture changed values. -func (v *SchemaValidations) ClearObjectValidations(cbs ...func(string, interface{})) { - done := make(clearedValidations, 0, 3) - defer func() { - done.apply(cbs) - }() - - if v.MaxProperties != nil { - done = append(done, clearedValidation{Validation: "maxProperties", Value: v.MaxProperties}) - v.MaxProperties = nil - } - if v.MinProperties != nil { - done = append(done, clearedValidation{Validation: "minProperties", Value: v.MinProperties}) - v.MinProperties = nil - } - if v.PatternProperties != nil { - done = append(done, clearedValidation{Validation: "patternProperties", Value: v.PatternProperties}) - v.PatternProperties = nil - } -} diff --git a/vendor/github.com/go-openapi/spec/xml_object.go b/vendor/github.com/go-openapi/spec/xml_object.go deleted file mode 100644 index 945a46703..000000000 --- a/vendor/github.com/go-openapi/spec/xml_object.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package spec - -// XMLObject a metadata object that allows for more fine-tuned XML model definitions. -// -// For more information: http://goo.gl/8us55a#xmlObject -type XMLObject struct { - Name string `json:"name,omitempty"` - Namespace string `json:"namespace,omitempty"` - Prefix string `json:"prefix,omitempty"` - Attribute bool `json:"attribute,omitempty"` - Wrapped bool `json:"wrapped,omitempty"` -} - -// WithName sets the xml name for the object -func (x *XMLObject) WithName(name string) *XMLObject { - x.Name = name - return x -} - -// WithNamespace sets the xml namespace for the object -func (x *XMLObject) WithNamespace(namespace string) *XMLObject { - x.Namespace = namespace - return x -} - -// WithPrefix sets the xml prefix for the object -func (x *XMLObject) WithPrefix(prefix string) *XMLObject { - x.Prefix = prefix - return x -} - -// AsAttribute flags this object as xml attribute -func (x *XMLObject) AsAttribute() *XMLObject { - x.Attribute = true - return x -} - -// AsElement flags this object as an xml node -func (x *XMLObject) AsElement() *XMLObject { - x.Attribute = false - return x -} - -// AsWrapped flags this object as wrapped, this is mostly useful for array types -func (x *XMLObject) AsWrapped() *XMLObject { - x.Wrapped = true - return x -} - -// AsUnwrapped flags this object as an xml node -func (x *XMLObject) AsUnwrapped() *XMLObject { - x.Wrapped = false - return x -} diff --git a/vendor/github.com/go-openapi/strfmt/.editorconfig b/vendor/github.com/go-openapi/strfmt/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/go-openapi/strfmt/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/go-openapi/strfmt/.gitattributes b/vendor/github.com/go-openapi/strfmt/.gitattributes deleted file mode 100644 index d020be8ea..000000000 --- a/vendor/github.com/go-openapi/strfmt/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -*.go text eol=lf - diff --git a/vendor/github.com/go-openapi/strfmt/.gitignore b/vendor/github.com/go-openapi/strfmt/.gitignore deleted file mode 100644 index dd91ed6a0..000000000 --- a/vendor/github.com/go-openapi/strfmt/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -secrets.yml -coverage.out diff --git a/vendor/github.com/go-openapi/strfmt/.golangci.yml b/vendor/github.com/go-openapi/strfmt/.golangci.yml deleted file mode 100644 index da12d5e3b..000000000 --- a/vendor/github.com/go-openapi/strfmt/.golangci.yml +++ /dev/null @@ -1,49 +0,0 @@ -linters-settings: - govet: - check-shadowing: true - golint: - min-confidence: 0 - gocyclo: - min-complexity: 31 - maligned: - suggest-new: true - dupl: - threshold: 100 - goconst: - min-len: 2 - min-occurrences: 4 - -linters: - enable-all: true - disable: - - maligned - - lll - - gochecknoinits - - gochecknoglobals - - godox - - gocognit - - whitespace - - wsl - - funlen - - wrapcheck - - testpackage - - nlreturn - - gofumpt - - goerr113 - - gci - - gomnd - - godot - - exhaustivestruct - - paralleltest - - varnamelen - - ireturn - #- thelper - -issues: - exclude-rules: - - path: bson.go - text: "should be .*ObjectID" - linters: - - golint - - stylecheck - diff --git a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md b/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md deleted file mode 100644 index 9322b065e..000000000 --- a/vendor/github.com/go-openapi/strfmt/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,74 +0,0 @@ -# Contributor Covenant Code of Conduct - -## Our Pledge - -In the interest of fostering an open and welcoming environment, we as -contributors and maintainers pledge to making participation in our project and -our community a harassment-free experience for everyone, regardless of age, body -size, disability, ethnicity, gender identity and expression, level of experience, -nationality, personal appearance, race, religion, or sexual identity and -orientation. - -## Our Standards - -Examples of behavior that contributes to creating a positive environment -include: - -* Using welcoming and inclusive language -* Being respectful of differing viewpoints and experiences -* Gracefully accepting constructive criticism -* Focusing on what is best for the community -* Showing empathy towards other community members - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery and unwelcome sexual attention or -advances -* Trolling, insulting/derogatory comments, and personal or political attacks -* Public or private harassment -* Publishing others' private information, such as a physical or electronic - address, without explicit permission -* Other conduct which could reasonably be considered inappropriate in a - professional setting - -## Our Responsibilities - -Project maintainers are responsible for clarifying the standards of acceptable -behavior and are expected to take appropriate and fair corrective action in -response to any instances of unacceptable behavior. - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -## Scope - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. Examples of -representing a project or community include using an official project e-mail -address, posting via an official social media account, or acting as an appointed -representative at an online or offline event. Representation of a project may be -further defined and clarified by project maintainers. - -## Enforcement - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at ivan+abuse@flanders.co.nz. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. - -Project maintainers who do not follow or enforce the Code of Conduct in good -faith may face temporary or permanent repercussions as determined by other -members of the project's leadership. - -## Attribution - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, -available at [http://contributor-covenant.org/version/1/4][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/go-openapi/strfmt/LICENSE b/vendor/github.com/go-openapi/strfmt/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/github.com/go-openapi/strfmt/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-openapi/strfmt/README.md b/vendor/github.com/go-openapi/strfmt/README.md deleted file mode 100644 index 0cf89d776..000000000 --- a/vendor/github.com/go-openapi/strfmt/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Strfmt [![Build Status](https://travis-ci.org/go-openapi/strfmt.svg?branch=master)](https://travis-ci.org/go-openapi/strfmt) [![codecov](https://codecov.io/gh/go-openapi/strfmt/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/strfmt) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) - -[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/strfmt/master/LICENSE) -[![GoDoc](https://godoc.org/github.com/go-openapi/strfmt?status.svg)](http://godoc.org/github.com/go-openapi/strfmt) -[![GolangCI](https://golangci.com/badges/github.com/go-openapi/strfmt.svg)](https://golangci.com) -[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/strfmt)](https://goreportcard.com/report/github.com/go-openapi/strfmt) - -This package exposes a registry of data types to support string formats in the go-openapi toolkit. - -strfmt represents a well known string format such as credit card or email. The go toolkit for OpenAPI specifications knows how to deal with those. - -## Supported data formats -go-openapi/strfmt follows the swagger 2.0 specification with the following formats -defined [here](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types). - -It also provides convenient extensions to go-openapi users. - -- [x] JSON-schema draft 4 formats - - date-time - - email - - hostname - - ipv4 - - ipv6 - - uri -- [x] swagger 2.0 format extensions - - binary - - byte (e.g. base64 encoded string) - - date (e.g. "1970-01-01") - - password -- [x] go-openapi custom format extensions - - bsonobjectid (BSON objectID) - - creditcard - - duration (e.g. "3 weeks", "1ms") - - hexcolor (e.g. "#FFFFFF") - - isbn, isbn10, isbn13 - - mac (e.g "01:02:03:04:05:06") - - rgbcolor (e.g. "rgb(100,100,100)") - - ssn - - uuid, uuid3, uuid4, uuid5 - - cidr (e.g. "192.0.2.1/24", "2001:db8:a0b:12f0::1/32") - - ulid (e.g. "00000PP9HGSBSSDZ1JTEXBJ0PW", [spec](https://github.com/ulid/spec)) - -> NOTE: as the name stands for, this package is intended to support string formatting only. -> It does not provide validation for numerical values with swagger format extension for JSON types "number" or -> "integer" (e.g. float, double, int32...). - -## Type conversion - -All types defined here are stringers and may be converted to strings with `.String()`. -Note that most types defined by this package may be converted directly to string like `string(Email{})`. - -`Date` and `DateTime` may be converted directly to `time.Time` like `time.Time(Time{})`. -Similarly, you can convert `Duration` to `time.Duration` as in `time.Duration(Duration{})` - -## Using pointers - -The `conv` subpackage provides helpers to convert the types to and from pointers, just like `go-openapi/swag` does -with primitive types. - -## Format types -Types defined in strfmt expose marshaling and validation capabilities. - -List of defined types: -- Base64 -- CreditCard -- Date -- DateTime -- Duration -- Email -- HexColor -- Hostname -- IPv4 -- IPv6 -- CIDR -- ISBN -- ISBN10 -- ISBN13 -- MAC -- ObjectId -- Password -- RGBColor -- SSN -- URI -- UUID -- UUID3 -- UUID4 -- UUID5 -- [ULID](https://github.com/ulid/spec) diff --git a/vendor/github.com/go-openapi/strfmt/bson.go b/vendor/github.com/go-openapi/strfmt/bson.go deleted file mode 100644 index 8740b1505..000000000 --- a/vendor/github.com/go-openapi/strfmt/bson.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "fmt" - - "go.mongodb.org/mongo-driver/bson" - - "go.mongodb.org/mongo-driver/bson/bsontype" - bsonprim "go.mongodb.org/mongo-driver/bson/primitive" -) - -func init() { - var id ObjectId - // register this format in the default registry - Default.Add("bsonobjectid", &id, IsBSONObjectID) -} - -// IsBSONObjectID returns true when the string is a valid BSON.ObjectId -func IsBSONObjectID(str string) bool { - _, err := bsonprim.ObjectIDFromHex(str) - return err == nil -} - -// ObjectId represents a BSON object ID (alias to go.mongodb.org/mongo-driver/bson/primitive.ObjectID) -// -// swagger:strfmt bsonobjectid -type ObjectId bsonprim.ObjectID //nolint:revive - -// NewObjectId creates a ObjectId from a Hex String -func NewObjectId(hex string) ObjectId { //nolint:revive - oid, err := bsonprim.ObjectIDFromHex(hex) - if err != nil { - panic(err) - } - return ObjectId(oid) -} - -// MarshalText turns this instance into text -func (id ObjectId) MarshalText() ([]byte, error) { - oid := bsonprim.ObjectID(id) - if oid == bsonprim.NilObjectID { - return nil, nil - } - return []byte(oid.Hex()), nil -} - -// UnmarshalText hydrates this instance from text -func (id *ObjectId) UnmarshalText(data []byte) error { // validation is performed later on - if len(data) == 0 { - *id = ObjectId(bsonprim.NilObjectID) - return nil - } - oidstr := string(data) - oid, err := bsonprim.ObjectIDFromHex(oidstr) - if err != nil { - return err - } - *id = ObjectId(oid) - return nil -} - -// Scan read a value from a database driver -func (id *ObjectId) Scan(raw interface{}) error { - var data []byte - switch v := raw.(type) { - case []byte: - data = v - case string: - data = []byte(v) - default: - return fmt.Errorf("cannot sql.Scan() strfmt.URI from: %#v", v) - } - - return id.UnmarshalText(data) -} - -// Value converts a value to a database driver value -func (id ObjectId) Value() (driver.Value, error) { - return driver.Value(bsonprim.ObjectID(id).Hex()), nil -} - -func (id ObjectId) String() string { - return bsonprim.ObjectID(id).Hex() -} - -// MarshalJSON returns the ObjectId as JSON -func (id ObjectId) MarshalJSON() ([]byte, error) { - return bsonprim.ObjectID(id).MarshalJSON() -} - -// UnmarshalJSON sets the ObjectId from JSON -func (id *ObjectId) UnmarshalJSON(data []byte) error { - var obj bsonprim.ObjectID - if err := obj.UnmarshalJSON(data); err != nil { - return err - } - *id = ObjectId(obj) - return nil -} - -// MarshalBSON renders the object id as a BSON document -func (id ObjectId) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": bsonprim.ObjectID(id)}) -} - -// UnmarshalBSON reads the objectId from a BSON document -func (id *ObjectId) UnmarshalBSON(data []byte) error { - var obj struct { - Data bsonprim.ObjectID - } - if err := bson.Unmarshal(data, &obj); err != nil { - return err - } - *id = ObjectId(obj.Data) - return nil -} - -// MarshalBSONValue is an interface implemented by types that can marshal themselves -// into a BSON document represented as bytes. The bytes returned must be a valid -// BSON document if the error is nil. -func (id ObjectId) MarshalBSONValue() (bsontype.Type, []byte, error) { - oid := bsonprim.ObjectID(id) - return bsontype.ObjectID, oid[:], nil -} - -// UnmarshalBSONValue is an interface implemented by types that can unmarshal a -// BSON value representation of themselves. The BSON bytes and type can be -// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it -// wishes to retain the data after returning. -func (id *ObjectId) UnmarshalBSONValue(tpe bsontype.Type, data []byte) error { - var oid bsonprim.ObjectID - copy(oid[:], data) - *id = ObjectId(oid) - return nil -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (id *ObjectId) DeepCopyInto(out *ObjectId) { - *out = *id -} - -// DeepCopy copies the receiver into a new ObjectId. -func (id *ObjectId) DeepCopy() *ObjectId { - if id == nil { - return nil - } - out := new(ObjectId) - id.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/go-openapi/strfmt/date.go b/vendor/github.com/go-openapi/strfmt/date.go deleted file mode 100644 index f0b310964..000000000 --- a/vendor/github.com/go-openapi/strfmt/date.go +++ /dev/null @@ -1,187 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/json" - "errors" - "fmt" - "time" - - "go.mongodb.org/mongo-driver/bson" -) - -func init() { - d := Date{} - // register this format in the default registry - Default.Add("date", &d, IsDate) -} - -// IsDate returns true when the string is a valid date -func IsDate(str string) bool { - _, err := time.Parse(RFC3339FullDate, str) - return err == nil -} - -const ( - // RFC3339FullDate represents a full-date as specified by RFC3339 - // See: http://goo.gl/xXOvVd - RFC3339FullDate = "2006-01-02" -) - -// Date represents a date from the API -// -// swagger:strfmt date -type Date time.Time - -// String converts this date into a string -func (d Date) String() string { - return time.Time(d).Format(RFC3339FullDate) -} - -// UnmarshalText parses a text representation into a date type -func (d *Date) UnmarshalText(text []byte) error { - if len(text) == 0 { - return nil - } - dd, err := time.Parse(RFC3339FullDate, string(text)) - if err != nil { - return err - } - *d = Date(dd) - return nil -} - -// MarshalText serializes this date type to string -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// Scan scans a Date value from database driver type. -func (d *Date) Scan(raw interface{}) error { - switch v := raw.(type) { - case []byte: - return d.UnmarshalText(v) - case string: - return d.UnmarshalText([]byte(v)) - case time.Time: - *d = Date(v) - return nil - case nil: - *d = Date{} - return nil - default: - return fmt.Errorf("cannot sql.Scan() strfmt.Date from: %#v", v) - } -} - -// Value converts Date to a primitive value ready to written to a database. -func (d Date) Value() (driver.Value, error) { - return driver.Value(d.String()), nil -} - -// MarshalJSON returns the Date as JSON -func (d Date) MarshalJSON() ([]byte, error) { - return json.Marshal(time.Time(d).Format(RFC3339FullDate)) -} - -// UnmarshalJSON sets the Date from JSON -func (d *Date) UnmarshalJSON(data []byte) error { - if string(data) == jsonNull { - return nil - } - var strdate string - if err := json.Unmarshal(data, &strdate); err != nil { - return err - } - tt, err := time.Parse(RFC3339FullDate, strdate) - if err != nil { - return err - } - *d = Date(tt) - return nil -} - -func (d Date) MarshalBSON() ([]byte, error) { - return bson.Marshal(bson.M{"data": d.String()}) -} - -func (d *Date) UnmarshalBSON(data []byte) error { - var m bson.M - if err := bson.Unmarshal(data, &m); err != nil { - return err - } - - if data, ok := m["data"].(string); ok { - rd, err := time.Parse(RFC3339FullDate, data) - if err != nil { - return err - } - *d = Date(rd) - return nil - } - - return errors.New("couldn't unmarshal bson bytes value as Date") -} - -// DeepCopyInto copies the receiver and writes its value into out. -func (d *Date) DeepCopyInto(out *Date) { - *out = *d -} - -// DeepCopy copies the receiver into a new Date. -func (d *Date) DeepCopy() *Date { - if d == nil { - return nil - } - out := new(Date) - d.DeepCopyInto(out) - return out -} - -// GobEncode implements the gob.GobEncoder interface. -func (d Date) GobEncode() ([]byte, error) { - return d.MarshalBinary() -} - -// GobDecode implements the gob.GobDecoder interface. -func (d *Date) GobDecode(data []byte) error { - return d.UnmarshalBinary(data) -} - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d Date) MarshalBinary() ([]byte, error) { - return time.Time(d).MarshalBinary() -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Date) UnmarshalBinary(data []byte) error { - var original time.Time - - err := original.UnmarshalBinary(data) - if err != nil { - return err - } - - *d = Date(original) - - return nil -} - -// Equal checks if two Date instances are equal -func (d Date) Equal(d2 Date) bool { - return time.Time(d).Equal(time.Time(d2)) -} diff --git a/vendor/github.com/go-openapi/strfmt/default.go b/vendor/github.com/go-openapi/strfmt/default.go deleted file mode 100644 index a89a4de3f..000000000 --- a/vendor/github.com/go-openapi/strfmt/default.go +++ /dev/null @@ -1,2035 +0,0 @@ -// Copyright 2015 go-swagger maintainers -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package strfmt - -import ( - "database/sql/driver" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "net/mail" - "regexp" - "strings" - - "github.com/asaskevich/govalidator" - "go.mongodb.org/mongo-driver/bson" -) - -const ( - // HostnamePattern http://json-schema.org/latest/json-schema-validation.html#anchor114 - // A string instance is valid against this attribute if it is a valid - // representation for an Internet host name, as defined by RFC 1034, section 3.1 [RFC1034]. - // http://tools.ietf.org/html/rfc1034#section-3.5 - // ::= any one of the ten digits 0 through 9 - // var digit = /[0-9]/; - // ::= any one of the 52 alphabetic characters A through Z in upper case and a through z in lower case - // var letter = /[a-zA-Z]/; - // ::= | - // var letDig = /[0-9a-zA-Z]/; - // ::= | "-" - // var letDigHyp = /[-0-9a-zA-Z]/; - // ::= | - // var ldhStr = /[-0-9a-zA-Z]+/; - //