From 7d1992abf8b2919296badc5e17730cc3e5f1242e Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 17 Sep 2025 14:19:49 +0200 Subject: [PATCH 1/6] chore: remove github.com/Jeffail/gabs --- go.mod | 1 - go.sum | 2 - test/e2e/config_test.go | 2 +- test/e2e/framework/client.go | 2 +- test/e2e/user_workload_monitoring_test.go | 44 +- test/e2e/utils.go | 22 +- vendor/github.com/Jeffail/gabs/LICENSE | 19 - vendor/github.com/Jeffail/gabs/README.md | 290 -------- vendor/github.com/Jeffail/gabs/gabs.go | 727 ------------------- vendor/github.com/Jeffail/gabs/gabs_logo.png | Bin 167771 -> 0 bytes vendor/modules.txt | 3 - 11 files changed, 34 insertions(+), 1078 deletions(-) delete mode 100644 vendor/github.com/Jeffail/gabs/LICENSE delete mode 100644 vendor/github.com/Jeffail/gabs/README.md delete mode 100644 vendor/github.com/Jeffail/gabs/gabs.go delete mode 100644 vendor/github.com/Jeffail/gabs/gabs_logo.png diff --git a/go.mod b/go.mod index c772f65bc7..d549017d30 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,6 @@ module github.com/openshift/cluster-monitoring-operator go 1.24.0 require ( - github.com/Jeffail/gabs v1.4.0 github.com/Jeffail/gabs/v2 v2.6.1 github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 github.com/blang/semver/v4 v4.0.0 diff --git a/go.sum b/go.sum index 5b4b88d5a3..f963f8e12f 100644 --- a/go.sum +++ b/go.sum @@ -51,8 +51,6 @@ github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab h1:UKkYhof1njT1/xq4SEg5z+VpTgjmNeHwPGRQl7takDI= github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= -github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Jeffail/gabs/v2 v2.6.1 h1:wwbE6nTQTwIMsMxzi6XFQQYRZ6wDc1mSdxoAN+9U4Gk= github.com/Jeffail/gabs/v2 v2.6.1/go.mod h1:xCn81vdHKxFUuWWAaD5jCTQDNPBMh5pPs9IJ+NcziBI= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= diff --git a/test/e2e/config_test.go b/test/e2e/config_test.go index cf6dd9a2fc..c3edd9b718 100644 --- a/test/e2e/config_test.go +++ b/test/e2e/config_test.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/Jeffail/gabs" + "github.com/Jeffail/gabs/v2" configv1 "github.com/openshift/api/config/v1" "github.com/openshift/cluster-monitoring-operator/pkg/client" "github.com/openshift/cluster-monitoring-operator/test/e2e/framework" diff --git a/test/e2e/framework/client.go b/test/e2e/framework/client.go index f79dcabe17..83d749a0ca 100644 --- a/test/e2e/framework/client.go +++ b/test/e2e/framework/client.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/Jeffail/gabs" + "github.com/Jeffail/gabs/v2" routev1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/test/e2e/user_workload_monitoring_test.go b/test/e2e/user_workload_monitoring_test.go index d76de934a8..85ba144902 100644 --- a/test/e2e/user_workload_monitoring_test.go +++ b/test/e2e/user_workload_monitoring_test.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/Jeffail/gabs" + "github.com/Jeffail/gabs/v2" "github.com/stretchr/testify/require" configv1 "github.com/openshift/api/config/v1" @@ -667,9 +667,9 @@ func assertTenancyForMetrics(t *testing.T) { return err } - labels, err := timeseries.Path("metric").ChildrenMap() - if err != nil { - return err + labels := timeseries.Path("metric").ChildrenMap() + if len(labels) == 0 { + return errors.New("empty metric") } ns := labels["namespace"].Data().(string) @@ -926,9 +926,9 @@ func assertTenancyForRulesAndAlerts(t *testing.T) { return err } - groups, err := res.Path("data.groups").Children() - if err != nil { - return err + groups := res.Path("data.groups").Children() + if groups == nil { + return errors.New("data.groups not found") } if len(groups) != 2 { @@ -962,15 +962,15 @@ func assertTenancyForRulesAndAlerts(t *testing.T) { var got []testData for _, group := range groups { - rules, err := group.Path("rules").Children() - if err != nil { - return err + rules := group.Path("rules").Children() + if rules == nil { + return errors.New("rules not found") } for _, rule := range rules { - labels, err := rule.Path("labels").ChildrenMap() - if err != nil { - return err + labels := rule.Path("labels").ChildrenMap() + if labels == nil { + return errors.New("labels not found") } got = append(got, testData{ @@ -1252,9 +1252,9 @@ func assertTenancyForSeriesMetadata(t *testing.T) { return err } - labels, err := res.Path("data").Children() - if err != nil { - return err + labels := res.Path("data").Children() + if labels == nil { + return errors.New("data not found") } if len(labels) == 0 { @@ -1306,9 +1306,9 @@ func assertTenancyForSeriesMetadata(t *testing.T) { return err } - series, err := res.Path("data").Children() - if err != nil { - return err + series := res.Path("data").Children() + if series == nil { + return errors.New("data not found") } if len(series) != 1 { @@ -1350,9 +1350,9 @@ func assertTenancyForSeriesMetadata(t *testing.T) { return err } - values, err := res.Path("data").Children() - if err != nil { - return err + values := res.Path("data").Children() + if values == nil { + return errors.New("data not found") } if len(values) != 1 { diff --git a/test/e2e/utils.go b/test/e2e/utils.go index 28004208eb..9e98348fdd 100644 --- a/test/e2e/utils.go +++ b/test/e2e/utils.go @@ -15,10 +15,11 @@ package e2e import ( + "errors" "fmt" "time" - "github.com/Jeffail/gabs" + "github.com/Jeffail/gabs/v2" "github.com/openshift/library-go/pkg/crypto" v1 "k8s.io/api/core/v1" @@ -32,9 +33,9 @@ func getActiveTarget(body []byte, jobName string) error { return err } - activeJobs, err := j.Path("data.activeTargets").Children() - if err != nil { - return err + activeJobs := j.Path("data.activeTargets").Children() + if activeJobs == nil { + return errors.New("data.activeTargets not found") } for _, job := range activeJobs { @@ -54,9 +55,9 @@ func getThanosRules(body []byte, expGroupName, expRuleName string) error { return err } - groups, err := j.Path("data.groups").Children() - if err != nil { - return err + groups := j.Path("data.groups").Children() + if groups == nil { + return errors.New("data.groups not found") } for i := 0; i < len(groups); i++ { @@ -65,11 +66,7 @@ func getThanosRules(body []byte, expGroupName, expRuleName string) error { continue } - rules, err := groups[i].Path("rules").Children() - if err != nil { - return err - } - + rules := groups[i].Path("rules").Children() for j := 0; j < len(rules); j++ { ruleName := rules[j].S("name").Data().(string) if ruleName == expRuleName { @@ -77,6 +74,7 @@ func getThanosRules(body []byte, expGroupName, expRuleName string) error { } } } + return fmt.Errorf("'%s' alert not found in '%s' group", expRuleName, expGroupName) } diff --git a/vendor/github.com/Jeffail/gabs/LICENSE b/vendor/github.com/Jeffail/gabs/LICENSE deleted file mode 100644 index 99a62c6298..0000000000 --- a/vendor/github.com/Jeffail/gabs/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2014 Ashley Jeffs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Jeffail/gabs/README.md b/vendor/github.com/Jeffail/gabs/README.md deleted file mode 100644 index 962ba68648..0000000000 --- a/vendor/github.com/Jeffail/gabs/README.md +++ /dev/null @@ -1,290 +0,0 @@ -![Gabs](gabs_logo.png "Gabs") - -Gabs is a small utility for dealing with dynamic or unknown JSON structures in -golang. It's pretty much just a helpful wrapper around the golang -`json.Marshal/json.Unmarshal` behaviour and `map[string]interface{}` objects. -It does nothing spectacular except for being fabulous. - -https://godoc.org/github.com/Jeffail/gabs - -## Install - -``` bash -go get github.com/Jeffail/gabs -``` - -## Use - -### Parsing and searching JSON - -``` go -jsonParsed, err := gabs.ParseJSON([]byte(`{ - "outter":{ - "inner":{ - "value1":10, - "value2":22 - }, - "alsoInner":{ - "value1":20, - "array1":[ - 30, 40 - ] - } - } -}`)) - -var value float64 -var ok bool - -value, ok = jsonParsed.Path("outter.inner.value1").Data().(float64) -// value == 10.0, ok == true - -value, ok = jsonParsed.Search("outter", "inner", "value1").Data().(float64) -// value == 10.0, ok == true - -gObj, err := jsonParsed.JSONPointer("/outter/alsoInner/array1/1") -if err != nil { - panic(err) -} -value, ok = gObj.Data().(float64) -// value == 40.0, ok == true - -value, ok = jsonParsed.Path("does.not.exist").Data().(float64) -// value == 0.0, ok == false - -exists := jsonParsed.Exists("outter", "inner", "value1") -// exists == true - -exists := jsonParsed.ExistsP("does.not.exist") -// exists == false -``` - -### Iterating objects - -``` go -jsonParsed, _ := gabs.ParseJSON([]byte(`{"object":{ "first": 1, "second": 2, "third": 3 }}`)) - -// S is shorthand for Search -children, _ := jsonParsed.S("object").ChildrenMap() -for key, child := range children { - fmt.Printf("key: %v, value: %v\n", key, child.Data().(string)) -} -``` - -### Iterating arrays - -``` go -jsonParsed, err := gabs.ParseJSON([]byte(`{"array":[ "first", "second", "third" ]}`)) -if err != nil { - panic(err) -} - -// S is shorthand for Search -children, err := jsonParsed.S("array").Children() -if err != nil { - panic(err) -} - -for _, child := range children { - fmt.Println(child.Data().(string)) -} -``` - -Will print: - -``` -first -second -third -``` - -Children() will return all children of an array in order. This also works on -objects, however, the children will be returned in a random order. - -### Searching through arrays - -If your JSON structure contains arrays you can still search the fields of the -objects within the array, this returns a JSON array containing the results for -each element. - -``` go -jsonParsed, err := gabs.ParseJSON([]byte(`{"array":[ {"value":1}, {"value":2}, {"value":3} ]}`)) -if err != nil { - panic(err) -} -fmt.Println(jsonParsed.Path("array.value").String()) -``` - -Will print: - -``` -[1,2,3] -``` - -### Generating JSON - -``` go -jsonObj := gabs.New() -// or gabs.Consume(jsonObject) to work on an existing map[string]interface{} - -jsonObj.Set(10, "outter", "inner", "value") -jsonObj.SetP(20, "outter.inner.value2") -jsonObj.Set(30, "outter", "inner2", "value3") - -fmt.Println(jsonObj.String()) -``` - -Will print: - -``` -{"outter":{"inner":{"value":10,"value2":20},"inner2":{"value3":30}}} -``` - -To pretty-print: - -``` go -fmt.Println(jsonObj.StringIndent("", " ")) -``` - -Will print: - -``` -{ - "outter": { - "inner": { - "value": 10, - "value2": 20 - }, - "inner2": { - "value3": 30 - } - } -} -``` - -### Generating Arrays - -``` go -jsonObj := gabs.New() - -jsonObj.Array("foo", "array") -// Or .ArrayP("foo.array") - -jsonObj.ArrayAppend(10, "foo", "array") -jsonObj.ArrayAppend(20, "foo", "array") -jsonObj.ArrayAppend(30, "foo", "array") - -fmt.Println(jsonObj.String()) -``` - -Will print: - -``` -{"foo":{"array":[10,20,30]}} -``` - -Working with arrays by index: - -``` go -jsonObj := gabs.New() - -// Create an array with the length of 3 -jsonObj.ArrayOfSize(3, "foo") - -jsonObj.S("foo").SetIndex("test1", 0) -jsonObj.S("foo").SetIndex("test2", 1) - -// Create an embedded array with the length of 3 -jsonObj.S("foo").ArrayOfSizeI(3, 2) - -jsonObj.S("foo").Index(2).SetIndex(1, 0) -jsonObj.S("foo").Index(2).SetIndex(2, 1) -jsonObj.S("foo").Index(2).SetIndex(3, 2) - -fmt.Println(jsonObj.String()) -``` - -Will print: - -``` -{"foo":["test1","test2",[1,2,3]]} -``` - -### Converting back to JSON - -This is the easiest part: - -``` go -jsonParsedObj, _ := gabs.ParseJSON([]byte(`{ - "outter":{ - "values":{ - "first":10, - "second":11 - } - }, - "outter2":"hello world" -}`)) - -jsonOutput := jsonParsedObj.String() -// Becomes `{"outter":{"values":{"first":10,"second":11}},"outter2":"hello world"}` -``` - -And to serialize a specific segment is as simple as: - -``` go -jsonParsedObj := gabs.ParseJSON([]byte(`{ - "outter":{ - "values":{ - "first":10, - "second":11 - } - }, - "outter2":"hello world" -}`)) - -jsonOutput := jsonParsedObj.Search("outter").String() -// Becomes `{"values":{"first":10,"second":11}}` -``` - -### Merge two containers - -You can merge a JSON structure into an existing one, where collisions will be -converted into a JSON array. - -``` go -jsonParsed1, _ := ParseJSON([]byte(`{"outter": {"value1": "one"}}`)) -jsonParsed2, _ := ParseJSON([]byte(`{"outter": {"inner": {"value3": "three"}}, "outter2": {"value2": "two"}}`)) - -jsonParsed1.Merge(jsonParsed2) -// Becomes `{"outter":{"inner":{"value3":"three"},"value1":"one"},"outter2":{"value2":"two"}}` -``` - -Arrays are merged: - -``` go -jsonParsed1, _ := ParseJSON([]byte(`{"array": ["one"]}`)) -jsonParsed2, _ := ParseJSON([]byte(`{"array": ["two"]}`)) - -jsonParsed1.Merge(jsonParsed2) -// Becomes `{"array":["one", "two"]}` -``` - -### Parsing Numbers - -Gabs uses the `json` package under the bonnet, which by default will parse all -number values into `float64`. If you need to parse `Int` values then you should -use a `json.Decoder` (https://golang.org/pkg/encoding/json/#Decoder): - -``` go -sample := []byte(`{"test":{"int":10, "float":6.66}}`) -dec := json.NewDecoder(bytes.NewReader(sample)) -dec.UseNumber() - -val, err := gabs.ParseJSONDecoder(dec) -if err != nil { - t.Errorf("Failed to parse: %v", err) - return -} - -intValue, err := val.Path("test.int").Data().(json.Number).Int64() -``` diff --git a/vendor/github.com/Jeffail/gabs/gabs.go b/vendor/github.com/Jeffail/gabs/gabs.go deleted file mode 100644 index 011c4c3924..0000000000 --- a/vendor/github.com/Jeffail/gabs/gabs.go +++ /dev/null @@ -1,727 +0,0 @@ -/* -Copyright (c) 2014 Ashley Jeffs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -// Package gabs implements a simplified wrapper around creating and parsing -// unknown or dynamic JSON. -package gabs - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "strconv" - "strings" -) - -//------------------------------------------------------------------------------ - -var ( - // ErrOutOfBounds indicates an index was out of bounds. - ErrOutOfBounds = errors.New("out of bounds") - - // ErrNotObjOrArray is returned when a target is not an object or array type - // but needs to be for the intended operation. - ErrNotObjOrArray = errors.New("not an object or array") - - // ErrNotObj is returned when a target is not an object but needs to be for - // the intended operation. - ErrNotObj = errors.New("not an object") - - // ErrNotArray is returned when a target is not an array but needs to be for - // the intended operation. - ErrNotArray = errors.New("not an array") - - // ErrPathCollision is returned when creating a path failed because an - // element collided with an existing value. - ErrPathCollision = errors.New("encountered value collision whilst building path") - - // ErrInvalidInputObj is returned when the input value was not a - // map[string]interface{}. - ErrInvalidInputObj = errors.New("invalid input object") - - // ErrInvalidInputText is returned when the input data could not be parsed. - ErrInvalidInputText = errors.New("input text could not be parsed") - - // ErrInvalidPath is returned when the filepath was not valid. - ErrInvalidPath = errors.New("invalid file path") - - // ErrInvalidBuffer is returned when the input buffer contained an invalid - // JSON string. - ErrInvalidBuffer = errors.New("input buffer contained invalid JSON") -) - -//------------------------------------------------------------------------------ - -func resolveJSONPointerHierarchy(path string) ([]string, error) { - if len(path) < 1 { - return nil, errors.New("failed to resolve JSON pointer: path must not be empty") - } - if path[0] != '/' { - return nil, errors.New("failed to resolve JSON pointer: path must begin with '/'") - } - hierarchy := strings.Split(path, "/")[1:] - for i, v := range hierarchy { - v = strings.Replace(v, "~1", "/", -1) - v = strings.Replace(v, "~0", "~", -1) - hierarchy[i] = v - } - return hierarchy, nil -} - -//------------------------------------------------------------------------------ - -// Container references a specific element within a JSON structure. -type Container struct { - object interface{} -} - -// Data returns the underlying interface{} of the target element in the JSON -// structure. -func (g *Container) Data() interface{} { - if g == nil { - return nil - } - return g.object -} - -//------------------------------------------------------------------------------ - -// Path searches the JSON structure following a path in dot notation. -func (g *Container) Path(path string) *Container { - return g.Search(strings.Split(path, ".")...) -} - -// Search attempts to find and return an object within the JSON structure by -// following a provided hierarchy of field names to locate the target. If the -// search encounters an array and has not reached the end target then it will -// iterate each object of the array for the target and return all of the results -// in a JSON array. -func (g *Container) Search(hierarchy ...string) *Container { - var object interface{} - - object = g.Data() - for target := 0; target < len(hierarchy); target++ { - if mmap, ok := object.(map[string]interface{}); ok { - object, ok = mmap[hierarchy[target]] - if !ok { - return nil - } - } else if marray, ok := object.([]interface{}); ok { - tmpArray := []interface{}{} - for _, val := range marray { - tmpGabs := &Container{val} - res := tmpGabs.Search(hierarchy[target:]...) - if res != nil { - tmpArray = append(tmpArray, res.Data()) - } - } - if len(tmpArray) == 0 { - return nil - } - return &Container{tmpArray} - } else { - return nil - } - } - return &Container{object} -} - -// JSONPointer parses a JSON pointer path (https://tools.ietf.org/html/rfc6901) -// and either returns a *gabs.Container containing the result or an error if the -// referenced item could not be found. -func (g *Container) JSONPointer(path string) (*Container, error) { - hierarchy, err := resolveJSONPointerHierarchy(path) - if err != nil { - return nil, err - } - - object := g.Data() - for target := 0; target < len(hierarchy); target++ { - pathSeg := hierarchy[target] - if mmap, ok := object.(map[string]interface{}); ok { - object, ok = mmap[pathSeg] - if !ok { - return nil, fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) - } - } else if marray, ok := object.([]interface{}); ok { - index, err := strconv.Atoi(pathSeg) - if err != nil { - return nil, fmt.Errorf("failed to resolve JSON pointer: could not parse index '%v' value '%v' into array index: %v", target, pathSeg, err) - } - if len(marray) <= index { - return nil, fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' exceeded target array size of '%v'", target, pathSeg, len(marray)) - } - object = marray[index] - } else { - return &Container{nil}, fmt.Errorf("failed to resolve JSON pointer: index '%v' field '%v' was not found", target, pathSeg) - } - } - return &Container{object}, nil -} - -// S is a shorthand alias for Search. -func (g *Container) S(hierarchy ...string) *Container { - return g.Search(hierarchy...) -} - -// Exists checks whether a path exists. -func (g *Container) Exists(hierarchy ...string) bool { - return g.Search(hierarchy...) != nil -} - -// ExistsP checks whether a dot notation path exists. -func (g *Container) ExistsP(path string) bool { - return g.Exists(strings.Split(path, ".")...) -} - -// Index attempts to find and return an element within a JSON array by an index. -func (g *Container) Index(index int) *Container { - if array, ok := g.Data().([]interface{}); ok { - if index >= len(array) { - return &Container{nil} - } - return &Container{array[index]} - } - return &Container{nil} -} - -// Children returns a slice of all children of an array element. This also works -// for objects, however, the children returned for an object will be in a random -// order and you lose the names of the returned objects this way. -func (g *Container) Children() ([]*Container, error) { - if array, ok := g.Data().([]interface{}); ok { - children := make([]*Container, len(array)) - for i := 0; i < len(array); i++ { - children[i] = &Container{array[i]} - } - return children, nil - } - if mmap, ok := g.Data().(map[string]interface{}); ok { - children := []*Container{} - for _, obj := range mmap { - children = append(children, &Container{obj}) - } - return children, nil - } - return nil, ErrNotObjOrArray -} - -// ChildrenMap returns a map of all the children of an object element. -func (g *Container) ChildrenMap() (map[string]*Container, error) { - if mmap, ok := g.Data().(map[string]interface{}); ok { - children := map[string]*Container{} - for name, obj := range mmap { - children[name] = &Container{obj} - } - return children, nil - } - return nil, ErrNotObj -} - -//------------------------------------------------------------------------------ - -// Set the value of a field at a JSON path, any parts of the path that do not -// exist will be constructed, and if a collision occurs with a non object type -// whilst iterating the path an error is returned. -func (g *Container) Set(value interface{}, path ...string) (*Container, error) { - if len(path) == 0 { - g.object = value - return g, nil - } - var object interface{} - if g.object == nil { - g.object = map[string]interface{}{} - } - object = g.object - for target := 0; target < len(path); target++ { - if mmap, ok := object.(map[string]interface{}); ok { - if target == len(path)-1 { - mmap[path[target]] = value - } else if mmap[path[target]] == nil { - mmap[path[target]] = map[string]interface{}{} - } - object = mmap[path[target]] - } else { - return &Container{nil}, ErrPathCollision - } - } - return &Container{object}, nil -} - -// SetP sets the value of a field at a JSON path using dot notation, any parts -// of the path that do not exist will be constructed, and if a collision occurs -// with a non object type whilst iterating the path an error is returned. -func (g *Container) SetP(value interface{}, path string) (*Container, error) { - return g.Set(value, strings.Split(path, ".")...) -} - -// SetIndex attempts to set a value of an array element based on an index. -func (g *Container) SetIndex(value interface{}, index int) (*Container, error) { - if array, ok := g.Data().([]interface{}); ok { - if index >= len(array) { - return &Container{nil}, ErrOutOfBounds - } - array[index] = value - return &Container{array[index]}, nil - } - return &Container{nil}, ErrNotArray -} - -// SetJSONPointer parses a JSON pointer path -// (https://tools.ietf.org/html/rfc6901) and sets the leaf to a value. Returns -// an error if the pointer could not be resolved due to missing fields. -func (g *Container) SetJSONPointer(value interface{}, path string) error { - hierarchy, err := resolveJSONPointerHierarchy(path) - if err != nil { - return err - } - - if len(hierarchy) == 0 { - g.object = value - return nil - } - - object := g.object - - for target := 0; target < len(hierarchy); target++ { - pathSeg := hierarchy[target] - if mmap, ok := object.(map[string]interface{}); ok { - if target == len(hierarchy)-1 { - object = value - mmap[pathSeg] = object - } else if object = mmap[pathSeg]; object == nil { - return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) - } - } else if marray, ok := object.([]interface{}); ok { - index, err := strconv.Atoi(pathSeg) - if err != nil { - return fmt.Errorf("failed to resolve JSON pointer: could not parse index '%v' value '%v' into array index: %v", target, pathSeg, err) - } - if len(marray) <= index { - return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' exceeded target array size of '%v'", target, pathSeg, len(marray)) - } - if target == len(hierarchy)-1 { - object = value - marray[index] = object - } else if object = marray[index]; object == nil { - return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) - } - } else { - return fmt.Errorf("failed to resolve JSON pointer: index '%v' value '%v' was not found", target, pathSeg) - } - } - return nil -} - -// Object creates a new JSON object at a target path. Returns an error if the -// path contains a collision with a non object type. -func (g *Container) Object(path ...string) (*Container, error) { - return g.Set(map[string]interface{}{}, path...) -} - -// ObjectP creates a new JSON object at a target path using dot notation. -// Returns an error if the path contains a collision with a non object type. -func (g *Container) ObjectP(path string) (*Container, error) { - return g.Object(strings.Split(path, ".")...) -} - -// ObjectI creates a new JSON object at an array index. Returns an error if the -// object is not an array or the index is out of bounds. -func (g *Container) ObjectI(index int) (*Container, error) { - return g.SetIndex(map[string]interface{}{}, index) -} - -// Array creates a new JSON array at a path. Returns an error if the path -// contains a collision with a non object type. -func (g *Container) Array(path ...string) (*Container, error) { - return g.Set([]interface{}{}, path...) -} - -// ArrayP creates a new JSON array at a path using dot notation. Returns an -// error if the path contains a collision with a non object type. -func (g *Container) ArrayP(path string) (*Container, error) { - return g.Array(strings.Split(path, ".")...) -} - -// ArrayI creates a new JSON array within an array at an index. Returns an error -// if the element is not an array or the index is out of bounds. -func (g *Container) ArrayI(index int) (*Container, error) { - return g.SetIndex([]interface{}{}, index) -} - -// ArrayOfSize creates a new JSON array of a particular size at a path. Returns -// an error if the path contains a collision with a non object type. -func (g *Container) ArrayOfSize(size int, path ...string) (*Container, error) { - a := make([]interface{}, size) - return g.Set(a, path...) -} - -// ArrayOfSizeP creates a new JSON array of a particular size at a path using -// dot notation. Returns an error if the path contains a collision with a non -// object type. -func (g *Container) ArrayOfSizeP(size int, path string) (*Container, error) { - return g.ArrayOfSize(size, strings.Split(path, ".")...) -} - -// ArrayOfSizeI create a new JSON array of a particular size within an array at -// an index. Returns an error if the element is not an array or the index is out -// of bounds. -func (g *Container) ArrayOfSizeI(size, index int) (*Container, error) { - a := make([]interface{}, size) - return g.SetIndex(a, index) -} - -// Delete an element at a path, an error is returned if the element does not -// exist. -func (g *Container) Delete(path ...string) error { - var object interface{} - - if g.object == nil { - return ErrNotObj - } - object = g.object - for target := 0; target < len(path); target++ { - if mmap, ok := object.(map[string]interface{}); ok { - if target == len(path)-1 { - if _, ok := mmap[path[target]]; ok { - delete(mmap, path[target]) - } else { - return ErrNotObj - } - } - object = mmap[path[target]] - } else { - return ErrNotObj - } - } - return nil -} - -// DeleteP deletes an element at a path using dot notation, an error is returned -// if the element does not exist. -func (g *Container) DeleteP(path string) error { - return g.Delete(strings.Split(path, ".")...) -} - -// MergeFn merges two objects using a provided function to resolve collisions. -// -// The collision function receives two interface{} arguments, destination (the -// original object) and source (the object being merged into the destination). -// Which ever value is returned becomes the new value in the destination object -// at the location of the collision. -func (g *Container) MergeFn(source *Container, collisionFn func(destination, source interface{}) interface{}) error { - var recursiveFnc func(map[string]interface{}, []string) error - recursiveFnc = func(mmap map[string]interface{}, path []string) error { - for key, value := range mmap { - newPath := append(path, key) - if g.Exists(newPath...) { - existingData := g.Search(newPath...).Data() - switch t := value.(type) { - case map[string]interface{}: - switch existingVal := existingData.(type) { - case map[string]interface{}: - if err := recursiveFnc(t, newPath); err != nil { - return err - } - default: - if _, err := g.Set(collisionFn(existingVal, t), newPath...); err != nil { - return err - } - } - default: - if _, err := g.Set(collisionFn(existingData, t), newPath...); err != nil { - return err - } - } - } else { - // path doesn't exist. So set the value - if _, err := g.Set(value, newPath...); err != nil { - return err - } - } - } - return nil - } - if mmap, ok := source.Data().(map[string]interface{}); ok { - return recursiveFnc(mmap, []string{}) - } - return nil -} - -// Merge a source object into an existing destination object. When a collision -// is found within the merged structures (both a source and destination object -// contain the same non-object keys) the result will be an array containing both -// values, where values that are already arrays will be expanded into the -// resulting array. -// -// It is possible to merge structures will different collision behaviours with -// MergeFn. -func (g *Container) Merge(source *Container) error { - return g.MergeFn(source, func(dest, source interface{}) interface{} { - destArr, destIsArray := dest.([]interface{}) - sourceArr, sourceIsArray := source.([]interface{}) - if destIsArray { - if sourceIsArray { - return append(destArr, sourceArr...) - } - return append(destArr, source) - } - if sourceIsArray { - return append(append([]interface{}{}, dest), sourceArr...) - } - return []interface{}{dest, source} - }) -} - -//------------------------------------------------------------------------------ - -/* -Array modification/search - Keeping these options simple right now, no need for -anything more complicated since you can just cast to []interface{}, modify and -then reassign with Set. -*/ - -// ArrayAppend attempts to append a value onto a JSON array at a path. If the -// target is not a JSON array then it will be converted into one, with its -// original contents set to the first element of the array. -func (g *Container) ArrayAppend(value interface{}, path ...string) error { - if array, ok := g.Search(path...).Data().([]interface{}); ok { - array = append(array, value) - _, err := g.Set(array, path...) - return err - } - - newArray := []interface{}{} - if d := g.Search(path...).Data(); d != nil { - newArray = append(newArray, d) - } - newArray = append(newArray, value) - - _, err := g.Set(newArray, path...) - return err -} - -// ArrayAppendP attempts to append a value onto a JSON array at a path using dot -// notation. If the target is not a JSON array then it will be converted into -// one, with its original contents set to the first element of the array. -func (g *Container) ArrayAppendP(value interface{}, path string) error { - return g.ArrayAppend(value, strings.Split(path, ".")...) -} - -// ArrayRemove attempts to remove an element identified by an index from a JSON -// array at a path. -func (g *Container) ArrayRemove(index int, path ...string) error { - if index < 0 { - return ErrOutOfBounds - } - array, ok := g.Search(path...).Data().([]interface{}) - if !ok { - return ErrNotArray - } - if index < len(array) { - array = append(array[:index], array[index+1:]...) - } else { - return ErrOutOfBounds - } - _, err := g.Set(array, path...) - return err -} - -// ArrayRemoveP attempts to remove an element identified by an index from a JSON -// array at a path using dot notation. -func (g *Container) ArrayRemoveP(index int, path string) error { - return g.ArrayRemove(index, strings.Split(path, ".")...) -} - -// ArrayElement attempts to access an element by an index from a JSON array at a -// path. -func (g *Container) ArrayElement(index int, path ...string) (*Container, error) { - if index < 0 { - return &Container{nil}, ErrOutOfBounds - } - array, ok := g.Search(path...).Data().([]interface{}) - if !ok { - return &Container{nil}, ErrNotArray - } - if index < len(array) { - return &Container{array[index]}, nil - } - return &Container{nil}, ErrOutOfBounds -} - -// ArrayElementP attempts to access an element by an index from a JSON array at -// a path using dot notation. -func (g *Container) ArrayElementP(index int, path string) (*Container, error) { - return g.ArrayElement(index, strings.Split(path, ".")...) -} - -// ArrayCount counts the number of elements in a JSON array at a path. -func (g *Container) ArrayCount(path ...string) (int, error) { - if array, ok := g.Search(path...).Data().([]interface{}); ok { - return len(array), nil - } - return 0, ErrNotArray -} - -// ArrayCountP counts the number of elements in a JSON array at a path using dot -// notation. -func (g *Container) ArrayCountP(path string) (int, error) { - return g.ArrayCount(strings.Split(path, ".")...) -} - -//------------------------------------------------------------------------------ - -// Bytes marshals an element to a JSON []byte blob. -func (g *Container) Bytes() []byte { - if g.Data() != nil { - if bytes, err := json.Marshal(g.object); err == nil { - return bytes - } - } - return []byte("{}") -} - -// BytesIndent marshals an element to a JSON []byte blob formatted with a prefix -// and indent string. -func (g *Container) BytesIndent(prefix string, indent string) []byte { - if g.object != nil { - if bytes, err := json.MarshalIndent(g.object, prefix, indent); err == nil { - return bytes - } - } - return []byte("{}") -} - -// String marshals an element to a JSON formatted string. -func (g *Container) String() string { - return string(g.Bytes()) -} - -// StringIndent marshals an element to a JSON string formatted with a prefix and -// indent string. -func (g *Container) StringIndent(prefix string, indent string) string { - return string(g.BytesIndent(prefix, indent)) -} - -// EncodeOpt is a functional option for the EncodeJSON method. -type EncodeOpt func(e *json.Encoder) - -// EncodeOptHTMLEscape sets the encoder to escape the JSON for html. -func EncodeOptHTMLEscape(doEscape bool) EncodeOpt { - return func(e *json.Encoder) { - e.SetEscapeHTML(doEscape) - } -} - -// EncodeOptIndent sets the encoder to indent the JSON output. -func EncodeOptIndent(prefix string, indent string) EncodeOpt { - return func(e *json.Encoder) { - e.SetIndent(prefix, indent) - } -} - -// EncodeJSON marshals an element to a JSON formatted []byte using a variant -// list of modifier functions for the encoder being used. Functions for -// modifying the output are prefixed with EncodeOpt, e.g. EncodeOptHTMLEscape. -func (g *Container) EncodeJSON(encodeOpts ...EncodeOpt) []byte { - var b bytes.Buffer - encoder := json.NewEncoder(&b) - encoder.SetEscapeHTML(false) // Do not escape by default. - for _, opt := range encodeOpts { - opt(encoder) - } - if err := encoder.Encode(g.object); err != nil { - return []byte("{}") - } - result := b.Bytes() - if len(result) > 0 { - result = result[:len(result)-1] - } - return result -} - -// New creates a new gabs JSON object. -func New() *Container { - return &Container{map[string]interface{}{}} -} - -// Consume an already unmarshalled JSON object (or a new map[string]interface{}) -// into a *Container. -func Consume(root interface{}) (*Container, error) { - return &Container{root}, nil -} - -// ParseJSON unmarshals a JSON byte slice into a *Container. -func ParseJSON(sample []byte) (*Container, error) { - var gabs Container - - if err := json.Unmarshal(sample, &gabs.object); err != nil { - return nil, err - } - - return &gabs, nil -} - -// ParseJSONDecoder applies a json.Decoder to a *Container. -func ParseJSONDecoder(decoder *json.Decoder) (*Container, error) { - var gabs Container - - if err := decoder.Decode(&gabs.object); err != nil { - return nil, err - } - - return &gabs, nil -} - -// ParseJSONFile reads a file and unmarshals the contents into a *Container. -func ParseJSONFile(path string) (*Container, error) { - if len(path) > 0 { - cBytes, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - - container, err := ParseJSON(cBytes) - if err != nil { - return nil, err - } - - return container, nil - } - return nil, ErrInvalidPath -} - -// ParseJSONBuffer reads a buffer and unmarshals the contents into a *Container. -func ParseJSONBuffer(buffer io.Reader) (*Container, error) { - var gabs Container - jsonDecoder := json.NewDecoder(buffer) - if err := jsonDecoder.Decode(&gabs.object); err != nil { - return nil, err - } - - return &gabs, nil -} - -//------------------------------------------------------------------------------ diff --git a/vendor/github.com/Jeffail/gabs/gabs_logo.png b/vendor/github.com/Jeffail/gabs/gabs_logo.png deleted file mode 100644 index b6c1fad9931d5f5d8269150c097ac749ed32a73b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 167771 zcmeEuhd&mZG&wQ7f@mj24L*v!!Mb z#EwxTVs9eyOZ4;oFMfGExR-}KU+3O)?m5qM&XvzPS}OFkY_u0HT%cEd^<3}5h08J* zE?jK9LPP#d%0Zn9dAQ`Rq-t=5{0g}8?i2avt1hoh+%H^U_(}QtCrOZjo%~Byj~A~! z^quWIyx+LlUhwwz76v*wxZAvOu@!c91Eg=rv0b=u`-1B8X9hkQYtz0NnYMu>(qUl# zn&`Ns(iM|2@1#ox&tD9xCBCNn)?p#wK@SSH@y}IiWtqeW|w>kK@5*5So%v(xUD|gvPpZvx{T_ z|Nra%@4)|dU>I(lM|VDtX2>*E06vvprxr)N(zBfVlR^9JS)F;-2i5JDJ=|9er1(_> zyEhZ^UhePhJy;7$391#uPy9o9tIhRie3UVoM{=c1pKauk|I@T&{YPcDHG5RiN?~Ya zYV<{5TApeG@2j+1uhz4Ng@JEl)Tc$l2>Ds+Lr$s$nmAxX9;e=Sd4nT8t%)1_W0vt= zVyYsRrDk5kwOSPDmMeEwE|JIgk8ghsF*JQ*M!Te*#?3FOLc9EN$Wc{=CgQ9j&j=%x zyLk6;HB_3P+AM!7>}j2l-W&I46A!7=^Ig#Ici4jq(UKLz-AeR5)D%#IR{Oee%Iv-C zrm)38qm^do^;djj;qX@ZO+x^yc4-vaYo1l$d_u}A>IdL{5V_) z4P<3-m=9aH$mi3}lm%H`sWt%J)h}cW9gKnwb0IdalS? z2O%G7Lj){lhtMbw@ZFae#(fS6;mbS2S{BG6jEkRA0G?i4AcB}FvzNW_`V!X=1P4^V zy(_y-{~}I9l~3LDcKd!B@nX4Oir2o&;7W~`D1r5winr>V&4)d_PHV0-b(|`Fvt_pU zQiTko=o#hh9UzQYNWtn3J)e^r?d2iN5ObcaI;6onXm>Ggsa1nY3jDjV>wm%d3lnJ~42^nm>bgOzyU$zzL&aw3~f`hdm6O=7)s( z2EIW93djZ3OPQ46-$}|=RU)8jEOR^A;ht!L3YETrojkbZwRv zI`O)r&1SS4*BNIL^*8t$!~=yUrzxKCshrJvJrf1&38r}QIEYhjdmyrJ_de2OC~wCL z7}Dn`+<&p%n13mnOFu3Jbl{{)Vi!2wY;i89>2!&yH!LAXvK~g(1^LA8J0* z&iwbvIYBJHQFF*zbUKXXjUD_QW5~&&M}Mv8*E3n2quh+N4YIP#ZZcDh{DRFb%rDBA z)oPfohwY)sqocxs6D+eU9SZKX+U-P@Ptc_0ZqI+0@Lqce;e$1J^QHlnv$nKUy)S`I zj%mwqng-V>?~lHdyKVY+HQVwQkACR(IX`%%PK7&Utm6d3L;7yNu!4TU+kAZoLLwa# z9-|$t)9u5jvpu}E>fze*ihea4Gwd)5&;=HrD>4-8i54U~iQKFix;*PTnwmVYu=7p6 zspw$cq`;!o>hK>hwpN=A*An)CWcX%E8zJ9XQ;UeT%9geIBrZ*XiGFKCTs;?PxR@@E zA<2qMt>=Db{#Uc1j)wSd;jz+RoKLGb@B5E`Oi&#)D9C((%apZ=F^zA7>p5u_wWXsiS~&d~XE<2{R9XVR`QN79&x{d{Y|Cj&{Bn zZQ-r?F1OI}yg2|%RgE)alAwI3e}3C)2yJExE_VHl@!V>N^D-=Cl$Ii|6k+?B!UD+% z+8G|)T~*CRFxgsoUGM%{y_9a!0U6&7+=`-lm8%d?V!C?d9qfCPP>S0-TFxpQxgHP=xZC=qgReDY_y*2Jxx}cYFD(Ft*r& z&)kW_jC?zqEPflw9x7Q|4T2t%q)R+0LR4g|r+01EODKEZ!s10S$h)grn{E_< z(`od{Sntlfqht8D@!vAEnrzQH%5rG3m-VyTg6SB8mY+;F30ZnlBzp9uc8B6BbudOC z+l_njTj4sbh6%}Ppoh*2Rrw%rd*5nA+CGyVZ&@BB`CmK65Z=0UK;^8c<_aqu4&LfB znegoKJpl&y4pGpu>{gdcC_8o`(qpwPm#*7_mER|rpVKgC2C#1@-qv?d7c@av@yGkp zK4-88%;|NS!da=;mdUSy-Bg{En#L1SVAw{$otLT<2%Vd`M8CfaL>JpuJm5dA&9hBl zBd@Q@Pc+I!lwe)O1`~7eA7u7MOq4D6`W1+61Eyw0@dM~(N0aoByoQTmR&$mR3fAh? z^HBEw#v#~ZaHzyWFmF~rK}%2(+PB)b8kXJ_N_GHULve3evuDa#Vzy0BdWB&04vDb~ zRYOne%gQW<2GDG&3U_dL|Q`GkEVu%OEgeXV>gU9V4FkN zC47=8c6T9_O$hUyGUPEoRC%Q9T!} zHZ6Iq`8=#J9pnL_ptt| zAe-lVpU|CC65|I5U6|a>OAu{p+H%}z-e#jVR(eJEeBJl-L@=e4hje_?658Z1+U$OM zqQT!MtO#}TpBTL9bOJ927qRY7#4@Oc)I|fLZv=IeJHQ3neELC42_SEUxf?(F`Yb{U_kl@P(aXbXy3rgy;X@3nR(-H-C>d5O?i{wI#|Jq?IXLk@*x~Pt zv|9P{XWJ8Gx>3fmvmC`P?s*}7e^A0mP`^Qs5D0c7gx6%B^-wFbX}#)-eAP9%`|I@i z6`ZDX$9g}9$sia0{hW1^JUkVUbQrJZ3gf2B(|(nv6QtnVVlp|BJ|32y)M|&wwnL=2 z+AQWgntU&IM*FJyOE^QVN_15aI@q&-VDEWR+W}Snbgp@y|1j)uHJO>GfTE@gupe%k zl=WwIIH41+PSc(}(%CXmP3%TBT zd^fu8B6@J*us^Hcl0Zdm<)C-;%eLOLrt@56oe;bw1^M zEx2ZJ@PvzKx?#G>$Fz}#A{eIU*Q%padcd&99Kd$VBz>T20w!leQ$n~%O=2sW+ccu6 z3rJDd>~}w;+qpvmYz$HN5r$6(bxuh4G=&tc8s3ZIR{I>=`%EvVk6SdpP-xfXfzex6 zao+LWVgJ@b}ckX}QjdqLJvX zj!#bVCrYRrSLHCww*r=)Oz$LqdT<_m_08!h9=+(hT2Fs`TkuzpCR~6#O@;zY62|US zFwKUkP$?6hU)_KEL&ke0I*m{DD{F;khJr~LWMpPk0@fl*W*Rzm+EzJcT&^0I$w<~5 zB^U=|^5g=~tesH$XknXG7_GwH2UbB%3RL}hFv&!$yQo%eD(_fn2ela!OvRyM0`@R5 z8mFi{mE5I_S}oft4Q;8qjSIJ=hWb@F`3^qjB!j(oeV12ZL=k!TgM+8x!ad|sM1lez zQ1AG7^>T*lMssS=e!(WS?v!|vszh1e>U^n@_r)?R)y)^Fzchk)I)R4xfvO;~o#Lmn z+eDT|_7)jU(fDr+WWW}wM@AKn6lfnA5AOboS2E)sGfcl?p0}h1_)i>}p^X3|c3!P^ z;-d+pPGX89;{_5<@hNtDr6DMvY{2X7+d=naX)mJ4r!X;PC0UkJp900QaL#VtyMmIE zIYq)qcX53MyW7UDyw|1hw)=Jmb9NG~1-r+#3QH?V!gaC!cw5HuCzVU)l#^<97 zvl><6;Gw7RO2eV3vQi~Y;RAvDbPp4;heOj9h-?7d*q4`HE^(?E?Q;5*KMD&Ccqe1k zLvX1l{IWo?Y|f;cM5Mcl6b(^Mt4~y965cF>F`=t77W>3WoacsLY(b0Yk{80^B#H2| z!E<*vwwNMm^gp@B=kfXJ$0XBKS>5YFPlrYlWHi8zUo3<++f#XGdqMkl>ur6eH0e0R zAgAE`^vDpP^r$T1G8CwUay0Ud)tF87^8|PoZSDGs6BlU9SpT=hx7ar^4dh3(d>bi_ z(h}r9O3tg&TwW-jFYO4Fg0Z^98q}t~8?if~s~SWKxZkrS``5Zjy9`zbR=)z1#Z=GP z)L&kRY!9oV57)`b7Z;rA{7?+fn^L$5i%t26Va!eV`ppj^D0R>ico&FqdF+~0)fGt? zqvI&=KjJ!x+q6{D6DoB?#imuuG$ej!WMq&Qm-VGc7+lf4&Ja`$0rX zY3X+Oi0RIRPMx!wOsj{&jDQvv48lK{E!C`L^&YXSq^np{?gPy%i)OAqPcey06FZHr zVnBP-U@djsF)Fhruyw~!75Aab#w*DL4m1Us<;7p6JB6Fp(xV`$RNskj~X(%yysE06_~P$uzFoU!F)tJ+#@bXp1l9oR3>OT zQ4O#y+R{(hDUfhAay==l=a~KnP+117qLoZkNe?1|7z0OZN-@SA+*OTMMt2+<2i)&b zp-jPm^w!n>v!|^iQw@}W$b#%xg$q&O3#Rk#M_pWO<_RnO)^F^8E-sqvbS`$1-5FVR ziPY&PVQJ=TskV$@-m8iWQwyR&94n&9oAScysOZcV^hp30gJJEd-t^gfI6oQjg6C%k zEAn7{%Y;mdZtl-^-V0RKODxu8f=MNU_1XD2qTR3euY$Gl4dP(&_P**bi?7Uw4gLEc zZa`Cs8BKVhh29hiuNY=O-s^IgQKL_`qHpNQpgLFz6~8vc8!uo>2h#rO5u-e5_1Zc6 ze56Jp#h1U5oNgogJ%aL#Ts!D^$kuGv;D-)QpL|#oT^3lxx~!15fB&qz!$b6^!wtow zUg5nxtHo4Dv&l6oZ7jjtuAv?+_L6@(1=G@Cv=SSW{DAU=;V=JVInfTVYR9@;mV&@Y zTtG&==!QVQBchn|_`Q2xTVLLwtr4?Bli5zJ=5|}N56=m-47_3jGlYy^hPWDgPjqx! zdoFW5IQo6Rev!uJ*}3Q)J`LRzQ+b)Ri3|Bm4>~K!I~1X$&ThwHhb(srd;tVh&j$|f`im4k5 zq01VC^eGQvdBYP-P04IqiuBb{0TieMHH7|$fCFRW=mncN zf=N|P7DxbFOo(Lu7Lm@w=xFEt#ke9RYW!yk?xiYX9qg%uS}otvVDsUv3kD<*KBQel z2{_sMOCO@S8Y9<8C#-09lG{m#wsheOzc){7d z7_#GXD!3|vw&*>6-n`$b_*#R@h8Le|yOsKIPot?@G;lWcUY6WcMXa&6>4_rGqHx`F zNmBO{IWq2hwvx_ss$e~3lAj-Z52s!-WCSM@qX7RZUWi{cFGR+eldoTw(3T2j_QgXVQ|kW{mjYgq_PdAzIRn@Mo`A+O$98S=oXNA1 z|B1(n$JCJJd}={Jl-mPGHY(KX+eQmU%|@2k*u>W1e88o2Elf2AlGg@U z2HzZ9hVLWxCn}c|)`RzaZSqw&TT_1>RFBX}!dS_g&>q;n-GA@{XU1^I@_9g1z^M#8 zi}NS-Pn2!!O?HnOdiy9h&>CA5$<_^4uSp(~A;J$XF0zx8oSC-yH$`Zuzn!y$0fMH6I8)x_j@$Lmy9himGj2%4|WM;0fo zG(B5nQ7)8xI?okY{*lDZ*U$)u=UcDLvqr>BWbN^ z^MU-%4`&CgGRhzWX4jG$Sexqk-Gr!9#ir}%X~5&6A!E7s&=nJYfj4`+$D%E|om-*JS=9jUO+-3G2W_>PS~|C7`Ub{x!Rm_iFE3n~7S{ zcZ>li5s>jqkPRb~%aG4CRr{9Hz*t#_+9dFuuSVnwBT1rYTIn6-=~L`c#uWbb)M9mb zCpP4yJf(Vgv|xf_5WA*ZUusr9Hv9N5{R5b@hWc4fz?7Vucgi$DX%pGz>Ncg15z~!E z&4E}+v4QDa)5{s~>*O>~LSrO8tLeQiaWDw=&O01jXlmN)J-|vq%lV zz}}~(BE+LR>nB*g5#;uZFsiV2ce1Q$K1)h{L7jr0dzWXj$fpfcJ4wR7A=-_L3+(L~ z%0;McycdP70|Tl0rP3F9kt{HyENN2_hN`Qt7Q#YJV!qn0q~18Nc8+|5>Wa^38r<{&{I8o zedZoDPUIgy#QI>k!^W^X_N@5C%Ts!lQ(?ku&jZ=G8z+wIrM7f;JexuK9R*^)L*jx- zeS_>ycAkY=Sj)O48u-I+*~!$R!+yOQ@LulI<}y0Fl@AyaX%#!2X@B-5L9SYZJE-F` z8=0SxT`7kUOcw2Ai|6(N`SS}k8`PW)>v54~e1GlV^KCV(HtkVh5jZW$<3%5@ZFB-R z6mg7n)js=t!%Zz|Kl}vu4)8dU>GDF-HUquK{_9nKc~h=h2vIRO`?CmiQyj}JWI!CY$dB~z30?8d7DS|XIY(PCyOq)twa@V zh8~Nn-Sq0fiQv*Y_bau@y^@e)3Nv}1m!oJwS;oIT-up`jjchW?I3z-%y){knHeK{K zqzp4){7j(`-MpiaS?d;NoW-&j#=bFfQe%}HDza8ml&hy>WCbKNaS}#c=f9-6tWN#* z-Cgfv8}rs_*bE#>%Qoy654iUTQc*f;={n5P4hUR&62tmceg}V3L_dSr3?-!D5?7e7WHz>ud4Ln1GLWEe`f6H&bg&{^r-g=to!=XF1XKh>iDYA zQ9Cxuu%=Jns@CLs(8Ro?628Xa8*1}r7r#`iV^9oS|HZN-(fC;5M&p7=q!ZxAnP z6KV^1T&WBQgs8xbR;@u4Pgk&%-Z*xDX5BBTx?24hI#YDNs<)7m+1`6LaFsl~BPWTO z6Gd%-2~6V?@0i_;4*F`u@kuqoFRMgM*Nk00k_c7o`+vGO)#tYCq7mP23%q`C1|ltp z_+SQPoa=crT&v&6VXl>%2He#kuwu8mxqM%sI3laDL31ZUBG7Ro{VuCy(=~YdOy>G#XV*;6{l1hotMbu=-M>5k^Y6~* zRjV;0B@J9s@>CHLi`>!a3N4a>droxMq9&=2GdycsD*x{;0MqndXWDiLdY;kg2M(q< zng}H{xt7nDWV}(Ybq%>eRZpWRB&8m)oVD>G0kJi5BN0o-#!1E zOhTWW{=C7~(}#E2yoiDXQ0cnU2bG-`idW*&wz!*`^=us^aubV(+r;A1z@J}KGatpYt+5iS2yM4NyRk>|k?;+~Bk z;KW{W{qn}5kT!D3jdL3z8mR8&Hy7@|zrxq#klDlKFvjzH=O9Lux1*%k^XyvZN`XvH z&8Ww(C$1rdQga2UPyN>Ds)Bh%6Gt;aRl=x~M}Oiz)aUf2nw+ujLUn5)?8VdodXL|) zsi?uHzSEnup+dAfu0}xNirkYddP8N7nxg5eoa7X}aDT)pp^e>n=mqqz)y#&T6Q5V) zR&jbYxz!Pzut=(q2yEmcg&qvODw}GHYkWbcxWg0|nM)yk@~3n7yhqg?j0m5f2iG*$ z`7$IVjV$OBCn5#DqqsL`J}k=T^1y~9ba_sGSy4RgE9ZHW&f3d6)xpG z$hQ&D2O-*|0nv1*%HgI)eG7V~^UVK^%zTjcW74~n8jsk=rzxIIc`zK9@mmm|kT}hU zkl@#4QBD7iZeS>pmqnlLWj%SkU|o$H-Z=&BTd=<}o|rfadRSdXl}?1151(#)@9-28Fxm1qYP$Mje0M8aJSS8x z$~-GpbeW(zKhO+QzYLj3_R9ddAz!{JwS4oF_3+U%sQ{N+WF26eBS|PS_@(mU>5DSu zo}y>JzRWkZX1gEAX!6`=t*S%~IDbEup6I7x1h1p+#@}KH%Y1`7#E69#FYQ<+?Os1X z9KzvJa`TbDjq5S|h0P~fIXpLUjvwiy|%X_%DB5$yITbKbQOS~d+`u3B~SnKpma z`2AsyzBSk=sDn5)8K_>XIF>ldIaEZ zzB&1oy-_!$$`xe9wU0|9C*A8UqGz=S*kFGj`;u<)SXnnth52h55v0mJZFa8-Qux3Yph1AHyLJK)n=xc&(lrA_vG`Uy+R@|v?aNo7|$DAwlCy^{L#j~RC=po`$- z{Nr4I4F8(>Fl=iXgP$K?Ixnn+|7DpBne|&M8E%RxO8A)jiQyRqzX#6V5N1W9{70~NjTdSDs*UCdgOmj_ zuuaqbsa;%O0!?}WzX)b14}p%2d>lF6N-t7O$`53O#|%Rq^P&`|#SI}brOidpaN6I6 z{5+tlyStlglW?V>gMMomPs2!N`M5fus^5^w%cx1a(08+3&c^SZZ>E-$!5m~0H^0~X zn`{g%EHBRq!0XbXAsgCmlE%M%y-}@_TiXpYa`->G^4do* zZ+`5IW0`bpAi01beygrZK-_MC?U zT|>~FB3(h$bNR!2&D`6Fsu->wqd?#L2ag@)du><<*GaZ4Wwl0TnFmdXlPMtvGaa2X z0IXUp9a%&v&OZ)6k3{_Lp)*0|mSO&RF0o}XsEEmexL?E*!vn{3No9ADU}Rawn8X>{ zg0^ko2Ae_9BB3?d6_s44FTUTHcyr`vn9tb&H7fu4Vn&fv7w6eg*-WY2BBY0ppPYTF zZt$AOp~wX6=c+@M^E%DK5&YC9I5|i@$Bz)j9dIHa_sNeT^A)+Yb8?tpU2zN;8*V4l z{ce>5T8o8ihlpn9<6#AX_WrtrjXP@;!YlAMV|^h5|1;)CCw(7Ri!#qMr!o7YHq_eJ zP;EOxVp|fApe~PZSLm3{E$2h5ezvwM1RZ> z66FJKELKz{-I}bjB8f2i5qR*h05Ok;poYmI6@QFEnH6%aYT~T@GLHj(yj~^F{6Jiu z5a-RR5=U$ASeA454)#5I`IW&RL%H0|63yn@PFs!((VsykQtb}rDH{<`J6NG(YLo<3$@M!tJ#*s59Y&W9vtvdE8n|c3wiKxkK^|5mr z4tw+WvfXN{&Tm#VmGOzA@C?li=RKGXe8Tsez~z-+bQIxThR zr@W;>@rNpzFL&?A-B z;RlL)84`gY&lmR>iMgu}i9r$l2l+yMDxs0P`F$5vX+v`)mQFNPsBGePf|7VHbtovK z8h4~qV*h1nD&Z&&xMjvz@K^&NFqw*%m(%|DRW?$~EJk<#1;#Gel8r$Y)$qJV!NQjk zQdz9d+ZK;Drt_kfm~Ne>=!Iu&@N_Bl--ej3{K?3j5!!y+_I(hImMEaHxnCWBabrm#)cBsG zI*%5*C_i@$65%{%a@Olu@yzy}ul=9O?^TV?@OG^z@`0&SRk!jlcA9?YKsh+km(PP5 z3&v?U!KBstw=|ZL-#=7LmiY5E54W6If;h(2+~KUW?F;OW9171sWOaLuidXuVN-fYs zJ~O-U(n|)Ky@abtG~e^gfVWiTPoC|kzwI>zZ$Mjl6!!e!5Vt&2rhc7W@klg$@Fxx>=$gf&-qP9nU)Y#R|l(oFZ17N8A3ezR%DW*%uZ%S ztZ<+H=0V`ZcA^TB>b2QVAZ(0FSco_n$sc}UbEHZ(+aNQtQl>$Kn71+^B*M1>Kl1lDBjf%tzftxCBEod(&` z9B>SL;6t#X#QVQxjp{EK0Y{%N%9Y9MXiB6IA{=#0ui{sULllhPiE5S6{_sHa^kT-yY0Lh}9YMbIW`Cv?i>oaHwyPUeK&DWu$b?JlJ@)l$1_ zfvgLB8t6?klv?h^!|EmwUy$ja!&aDgy)4{u{nU`M_8g`DFIK-&#b!BVhJ#MbIrqEh z#1=TCZh~Fe#FoSt`wBtb=~{#C!NDmIBHk?j(J;BJ@ixx3GI;tWVJf^Lyc!@zYJ!LE z)`&nV>Y!Eys?Jh_Y)(uL8REL4*x+)zCX)Gj{q7y`$x+N|pu*%@er;qf>ENw+f7-Y4 zvE!>{2Q80slIIl0KSFdr6#DM%k-xpLt6X;sH1;-nnQ!7~D_h>yFS-M&h3WTTtCbuM z{%7Df|7tj?!Qd_c9NvH)qUUbpEIm#szYwZ>3&zG+4Z{^o1b zdVuTYj887ZgkezTW>xC zu>c6lis%`+B7Gk?FUZ9I+XX(+ZzbnMFD)3CPsiE| zUbxJIkXE1PQfYah+NYw4W<3lrLJRwax#m68H1o3K{pStlM3b?zJ5(VNA-W`hF^78d zb!me}+MF#QU3k&$#+%~!>!_YAfvVGwOcfY^9oH2cbHD^of49WJwtV-3rgkkYPfh-enN+5~@4&6X9lF8SqDm$1vMAhd9iztw_a{8{l9K zwt{NloagwZ0*TN=+(X7g{~MBf)qVfTSIF-4%4}7WEGMwanpJfju8NnvpT8!4M?VRo zc~gkm-3}Cvhz$-(N|qrlPX*(LuSvRu*w1PelZz@bQCW6gYGj%Di;E6cKc6jHxtW)*bKcn2BGsQ$)nSPNzITWDIx%;B6G6J_yhkKxMKexay|0JWoY?;C_iZKrr)UgEbGTmtFOjR zX?oDXSih+7H zUnWgV1FzHbLsYbQu4Tb&f2by?+(Pjv#!qsyT&nqnUZ+Fsh%1)#ovi@T$raqexV&<# zNDS(FyqqScnmFT8xpcZXDWLr^Tf)*4IjpGTE1YdUI0|1>1n9N`JXTCj13e=kOD$D9 zue?4K+9v(U?r8r|2owU?URt%BW@TJvkf$;pGm$YpZ5grz$cJYx0XEq7zmJBrP0GUm z7z%n2Hha%w(_qwjB5c#&XDU%haAW&2_{Ujadg>(dzN*9>#RYqo^4Sn%IcaPo}Zn5&)(+B%d5Ni55EO4;# zwRr8fjX~)=F(;4CG*@L*QH#3|2Q>bBZL_cEA06z`@)+*sluTPcj+)xr19U7cSZ!BWN68Ey$@n_XQLfKKb74mrg^&VMyf`0Y zS!g_(<7JxPr2j8^>HEdNrRVxxpFK%VX1^X|$<#&rH_(ZZ%({-@d?j!6(#bB5og}6O zHpaK5+6LS4uWbljm8ME+K1;$jq;5*9h|c&T=1hL>u0gS-@;B}PvgQGt_9SIgKEybo zeiR((c$4MKBLhkHrQTDuh`Z(p+{p+H_&9>Q6O!xO>LJecxT|8l5h$|}8}~r5_{kAA z7)v8hDpb_DUHly;>%}QS&)H}B#vONtIIrWb8`>@_nMsKi1)+$8gxePN6EzCOxr z&-XIt?UeVAdA^1it)Io-%2)s~bH05Zk)eQ0_?VF}`~)p>Y3Bx$66e%WLPL3^&4K9B z?e?kQArrbx*n;^%X<)r$c=vb|H=IjWz4H`p`*}$K5x8#(ZtCEg8CNPx&MnM`n31=^ zxw04Ne-Iix!Ay=ZXG90d4p8&@Drvao3^KM`4TV3|ZT=jiqA~QNCV1F~gr4bZ*x$R4 z5m@8AW=rr+uAtQt4z?Hf^pG+UNFQ*w|G2{PP<0l^Hh$1h7P-(K18ycF9zD_`mk9Xl zPY8PwVIQ~KU+k`^wrN2RjR>;ku%`C@>D(o}5{cV^Yz$c&=hYbN-q-p0(3Cwb`CD5A7c7&4hHv|WA9&8n%`^J&BfBHm zymV+sqt?-e;b-NMJ4mww`vsL|fSfU#by8-}^6)`qu7018DpB?TKZ_P&>;qhvP^Y^x z++-iLdVKkSYoL`hsrSTcm_$qPt6q+cc#9g@?hfl`HC@uiTva^IGu2MB7_1qXh7}qI zC5tfI;m}(ay9FJ*Bc-4mW~e9TpofF;9k_pgF?AV z{gF4`QZzpRneGpvvS^2`SJG3hq5M|rG<|J$vfN4a^!MFU@hl$0g~B>2G|#XF+NF9V z2-SUeX{vtZU`k;ifCgJtZl#!e`&v|x#Hmu{D7gmm2+6a5I^FDR^z04)rCqwcd%)!q z&mYSm>HZ+X)F^ss<;Y(lKiNibl3c`^aOtWUMjoUN*;n7DUsJGV5o!uAUJuXB$3~SZ zYhzo``?AI{EoiaO2WuQbaaH>Q&=EPPbJj{woZB!cRi2!U%XsOI;q()+s3h6jF-OKY zcNp9wtdr7me>(j)k`#F-CO67`JyX5`y`B=>%OUc8Z&UwC=$$`eF$Smn_MlIt?HHYb zRPb*gUN%5NG5f_$l$saz_nC#oYS3GU7i9&~=>Y9fUFaF{`R=Vr&qMOH_0WA=_v0|V zn@j-r;NDiSO>Su44ih+v`JE`aP4-;!iM{mpr?EgbfU%Q`peni|AV#b4P!yDABV~03 zjtj&NLb&DMuBDG0z+?0$c|v-JHlHU9h^_*9#GT<6Q7hZ`0Q=8e!;%)h*;OOszJU&l z$4SVNv^Ryori5k=)a0HN@agI)v(T0N%_;I#&9%Sr;lp`+$TclY=Dxx2oqiAfq_jOp zN$c??b zag<%C9cOwOGG4wR*6jZ+_IXnrn*)FO_-iHAiiz;d*yUSa-Cj9lOx9TYa?}H0D^HWoxiR6NhY|wYh%UqLhGY8K&S>;6UX_1?LTEu{dDtP}4J2*pSMvz!2rYoYgC9 zhL`pJWymb!C_H_o^VsYd|6elaF5c?2Q4=NksL`*$xVW(bXMfD&p!w`}Si$D2;Tt5= zZs~rmO#O|e29H+uAS>^~Z|7y0{g}T-F#7(aiS^a;2^=|)nK62bHR2LNI%pRB_%*SP4_Btp0r%Z5MHm)K!BYG zzF~^lNm^u2?}5NiB8=HZkXnRIVy3yPX4V>_yiXfg6&_gN8<9sZFKA{=zV#WcupX$P zoB7}h2iN#5uQY+Gu*?F}9OW1O@Phd70SwTAN-}ROHlF&978z1l8}T1Ltw<>M5T_Kg zR~gMPDNWqc5T zf_)pZO5tLGT|G(8+O?UEEF})am+b-h=>~|*ps&2pV6{r=$hDI>T}8=@#BA5zv9RnQ|eV0nEbzbNE zj>L?0Ji=I*xZA=JO^Av$QNr*N8g+tXFyF5YQZk3%E3)~%P9XP+BRI}|K>Z6!tLnXO zp?;6$ecWo1*WjH(D~{RK1;0OLD#t~sG?`${(F;6XmkIfRUKDUiHqqJ4f3_IBI{Bmd zojLjt_ggW*W?niHOc)i3Tzc#pp~7hbHfwOwAB^5`sbbspW}Jqix$d*IHJTfQZHE!5 z!Gxi>{o!LPBP?CR^e!{&SQFMotSLaH%9VXO8+r-48IA0p6Wy?`uJ#;iLCY5$f2oVZ zBg=MlUpT0b0l9d{7yi~;9SyLi;h91EN2mDz3gZZc5(Y;oa!SA!2PCWZeZA7(H7&jr zN;QlheFbuZ?_Oojjs3|W9lCP8HCIVXRp4E%Cs^4(sxw+uNy{DE0pv{EDbQbFG8(lTy`K(Ed40jI>1{JXfXSWNs$dupKmCrJ47@;e3GbC!&f-e?nOrA2 zcFm?uOo6`2lg>#f!du%UM3e5OE3RE@cWh%b%e8#2xzR7lm`fx`LQ#0=wa%M9O;fi# zB!#%@1ph6)fXrbqHur9CwYdxro?p!(fIl6J3|+j4!&*}aDBJl55VRCMCVwsgPgq3^X!)!~I_NhLx-ei2xl@RW zSWNKPO6BBYJOZYPt12Ich?X(z5HDc&(voZX#3e!fDkyEk%_f3mEmh?0fl`I#^Q2i{u_BYSEm*8r-Fu5u<^VjISR3m`uP>{{1>p318R(q5 z)FVUoQBtwjxD zv@rOdo+@_?hJPsotc0tl-0uzlnEd+;pMm)uL^+2&AYkZXEznG5UY6E5}7@<8svH6wks zp|gZ_KZ`*_Tq;6znUmoMdZj)%cWN*vF!qeo!f!)aQeaKQiWP4(hRrRs^oDaO?$S-1 zmH9Is)YcTo#G2=S0FR&cztRg*OLwgBPZ&TG&)!~_TVKvKtD{2sER1(V&U70IaQ!&l z+H$rEISUtE(y2ac(^~k_GIQ5W<)1hO*S3D0IcVun{dY{Jp}Q^h0FK8=M0>Bp<*7f|S~&t;-gJ#zR%jeJ zRvfTdz`06JzZQIcB>c>2J3*``w*5b%t}>|2t?SYvrKK%U+_l9WibL^I+}+)s-~rm= zQrx|Gkl;>%;t<^3Ex1E~FZaFe{brJx{7Gice)idGt-aSdVO1yj7#Unw&Xh%EwW~!u z*0eAiZBFFTGY6S=74V|g(Imhplvt^szz$7}wyogMo899}WQ5jSz0~#1ZO5zovOdYi z2&o2^gnTtUA~l$mWnpu_9b<9hUzZQdm*NQqR=;ynk+r(YVJDZJ~Ps( zhD3OQ4S064>bUYLwe3Q-mNdzpXv)_nt8R!(cX%>fKjxpM9sAHa{o-}~xdB2>nPhLt zogn#{Ftd=THri+(=UJ62+%ZZvu}7|W&UUIkcO_Im!KF7g8|*iw3w-<1`;M#f{?m|! zLt@rE^IRc2X{~S8fJE6@4RYKS?ZkA+S$ZSk{e7$ze1@RpqTPdkN8obhIJ`c??Kw;E zv;rq&`%qt)4V{Bk`QYWmI%iI{&rZFYgp8`#KkKCav(7`M31R6l-JYGd_!}!p6Aze` zE5QRoN?+}k1#N&kAo3{b3O)$yjA7FcPEMXh?jQe{kggleiVuiegbO{lKPfLrS5LW* zLkFVR-avV?nY-vEU2M&G$&jptIUc5Kzds1>-?rQJq~sZgTuGd(jI6Zh$p{kFEr`iR z^QA_mf^@xIK%U-Na|ETns`z{t*$xNGnj7=4?ez}IyWA#sYKiXN)JW*>A&!J(8Hw2} zXcklq9g~!Y$&jhl5Z4+N0{eC^kCheY#)Twdz`<~bK4>e9Ppi#XO$OF^HOznZi%q0sFWOcag z0w2&FflKf_no2IpIzOf{K_jXY>~7a=J0Y$)`3}MpLrPK`K^aOFKMhDut!dA}V%L5D z*g*H+{kuB&Z3x5+-)Hj4`bnN8kMIa#U!n5mNkxRc`dKcOrSY_ETeoHlFTPm%?ymhl zLU2RTYhGd_V!ko=PFs?P9lU~aFyP$&Qh>W=U_^K0_Y6sv7pe68bv4af;GtJtdDah5 zUD=^_^BDgk8rBH9e@`#x(zucmqOj<0K(^Wu zj3%&x~2Sbf*ap5wp!_}d0^^TYBrZ$@bLhg zEzAtPmwNtmCs$Dnd=n!)6L&_a>04HA?aKYnsb%Zv|FL^Fi_#B3{qZ??ITaF!dWCQD zJ$-t2@RCQXk9X4M;QE*wlJYx-Oh6%0g`YUb|6c6olZeDXR{^(E#8#%PU6@UdgeaP~6`P{pXTx=8b}aY9pCNk7pP$Lu@nWWePJzHtM7 z;HNS+o4ti54d1kD>Xb6)yDDKs7IW8Vh}B^f9<|@D6+gRjKw%lXip_#|(1`ckC~8}P zmBcK8ICAU}Vcx!s^Rb+Y@Dq>$_S9OXVS2oQ=RQv=^(erS3c`CDU;3vbUL^QG(40(2 za~*38xG03LQTSJ6{rr39I`Tmrc0aNfho?0}LY`5gQsuOumrSYmG3N{=CuqD33dByg zHk>;f4v?Q8rRM=p6H%YukN(ymOX?SZ%e&l4z%SO|P~M?esYH`rcj=S07j{dfZ59Bp z;50D<_z{s%b)`i9o12espz`&C&garnMW-Qs@QngL-=iz>DJ7wei2ykxZUMz6oNFh<+rc$h1=n#2eQ0>{Hls>|7-sj5!GAnZGP&wj6L%cjP<#(IaouLGzm^GFM$a= z98avyTjVb91C>{{XTz(BO|tLJaa4qQl)Erz zi<$Kz{8=s#NYPnZe>NDrvlzm`?;KuNXDTEv*0s4Kp+*NSm1ZjOLJ<2@N^133xH=Na& z!^RQO75RmurRx;hv$&bcEoqYI9k+-dDH)fRY{_}r!&Q!{xER^vtMLg#WIEuPIih*8 z@7`YRvaoqUbLoD%%8u$a!oc|!2df*TI%9v|y1?&TbuJ{WKWL8TCqw&bU2T6=Ui#$A zO^5~_--1EC_Z0|b_)ro;XQJdfEKU2+FM#dqjay|A8MHn4NmRK;9eXY zJzy%X(5SOeAaxQSJs92_C+6u~!h^!I5=!+`2U>Ig+OQiy?;ds_y|E)TpEc9JfJ;`- zr+5@_{Vd?62nNzWFJe{YCoa=Ka@2obpAiSaH}oWT`R?vlc(xPX9U29%n!|rz;SJsj z!3QGQdrQN#se&;o;e?OHVonza!d*Sx*W@+rYPt&J-wMfzOpY3#BQj^CB=W^t8K|Yi zr|88vxLRpDBLhebqXw5O8-k&GRO8ArJE=>jtCU6^K1f@#^$&UojqrKFf;sLQbDt>0 z%DNsq3hE$c=o>j&u*+Yqcz4dRMxt(-68_O`{nyfQ_8O!#4q2>wW`*U` zo-A%jyeTu%wQlzEDIUn+Kv>5UWOtbFI;9oASvgm-uA*UCT+hAeHpF`EFwa%ELPa+% zPLuCs21k>1NJRN>>M%@>N&iVHW!FzSZ%4WiUO{cU@q#jJ&za-H#pQ3&Cer(2xu`8Y zfeeKD+QH-J(Oc$<-8!k^FVTHpP&w!Y_9V^ZnJn46YH&YeBgtHpHH~BtTMji*_X zO(_;eR~EnD*Arc%)LTkNz$mv4lS*p;orT{|J0P+>`;2a8thJSNERorsRNw9h#%L-} zE5i$14Y%HnNoGwVxy6%Jxv~QKV!7y^H?Uq5+x)0E3{y?dJsuLBcp%I57PIpKRopXH zwUfZVTbK9zNWdgw^WkS&ou<2dv%tnwY#U5;E(<~(DBgY zv|vn8xJn_)9z_0eezTci__ESYoFq0^+hbwcw>0OvVchevg>d#>iK{&nhQAoS`%6Hs zjw0dTJx6}`>>c0B|3xuGY`~cjCgjhd_7E>F^WK{WgI9u!4~I8$S*OAq8<{3rOftrR zvRa_6<2ybqtD59qDwS`IZouhiW&Hb*-95!V@?c&UNO)qDX2N=H@ft=eD044)tB__|ja}T~3+qV@F+;JEYo++db94^ zwQcCrCcV^mt-#>;i2tJd@1nKIbK?Ge?mX0sfjYWM4NuR=XCTu@qK+k$c@$Lzgd_dk zj`wJHpyd04IHu!>re{F;Kky)b`|^TJ>ECj&{)3g&K)>)=VeuVZXyTVEjF9v& zyzG<|US0yBT(q492ao4(SU_7nLik8Fsg-79f+01o6oBpBcw@JVSM~w{B8aP3_sRp9 zG=v;xh7PB{NdfvFoXsP@G`<$Z7rE}y-X@k=gf2L5nN#jWOn#qsltZJeE&K9ZpWPF! z;1{=1wl<{XgR7sS5yAEAA&KSnyaeORvCFl$;X}nDVvHE>*dr0!a(?47&M2N8aqlq* zZIfC2!&a@9*6$syD+P7G>g9SL-ZyHhabJ*U{|xtOZ}%4OQUlO#D%(d&v%0X)@m zV1%P)ZQt=i=~OYaZ!XPt?zt zZ@UWsR zTqPR)4p{9UpnOMv`2F|q5KVmSvJfUf6}Mn1Nc&=YMvW2WatQvw zQ06$`r7LPQ3WbPnfR_3Br|FkeKe+Vd3^y^C7p(h@9*xyD2Cj@&Nzf9`xt8upjY&kr zKy56?sP0lH0m3cc9o?#(SPSwFQ`}~`&e2hi1%8z+Wh7-0^Au{zZe%=Y?%1ZBn*&2LK!P8DnRwEL^1mg!M@WZjm=dzC=g*??`CSbm z&WtVN(?9-z*7#?yqdl=d+3*gAh4#gD7Z?w&ZJuQ~iw$3@k0~f8>~4ZGe+;k365;}S zqeWKO{YvIf$~Eoal=s_l1uaEpa0{5dd++VMGfW((4fT)(W4t?jogYs_aGd-s1hi~N zC7yCsH}f^;T0gap(75bbQ6CzD;`S%QO}uOd^W&V0>GL0KAVoU0n)m+O_d=VJ0K6^M z-mYdsut{fGpMW0^JIW5#{B-0IO9`eO6pL&a6Kg4>gzF4dquy~Rwp!=@%l&dz)2|Ph zVivXplAJF7nN+Gf=5;+alH3|4Iz7V)7iGaXViq3BRNB?l3*fd5@2V<-9?nk_Wo{|e zX;cd{CdXZnzEUxWb_og`>8^xSJ6c{fFPghr0{w`?4W4i_D*&3$|FCRY5mB0dKRS&% zc06bx1(DHD)$r0cUzUz5o zUpf!19oKLCk)6#v3Wqh-&GgvRqIB7wAI}#+p-{n_5rJ3TGsP=>^{3YkrA68AyV_@!elr18RcOdcrP1>8^4d8=6*Z!?Z<>uWEgn7T(b+Ry=^ z26VxsVYyernSrE{FAsBv^@dK<=Qgs<%s**(Jc3Yu0jI?i*c)Aa>vWO}j~(>0%vTO9 z0XjNUZ%1wt-v`{d*HtZLgzjTI;MkRXBZ0 zwf}9sLKanqfOx_ z#O4YgB$DIQZ*9v*EXS3UuiMT=H#wSwCq$M#RaQyV0u{HOIYuK_ybvR8e9mhTQGZkT zmEsBZ(YnIpb%ul8){N&GXZ#Z2<>IZ@z$p>%38ho#Y9Z`l>_gq?-oT*vBl5)P-6WwE zTYo}QC@ef$4G#Z1L^`(LiGMNbbgsv=cuhy7u3Gt77hK+E2sFmgWS&jaUhB#xQV&G= zd4yyL#Eg}b-fG((7)pYP0M8Y8)Nr&tjmuTbx?1|~ko^sc)fN8KZ+f1_*uS-WoUv=& z$-nKH{|H2#aXyrvH9@?q#?9pnS<+2OJ>6i5rWobTYoZ-W8qPNprN`nu{sKYu#Qcdu z{sjp(DO})v!gFXXEzc8v@2zG{JD7lJq+IO!>cRi*jU+bJ@giNH4l7PM<5e})goNXu zkgDDdxhOv&qf3fPuwmcVB&ze|GrzKDO;P313}dHflCBQR!J1+(bUjl}J7Kz3`NJ_}Q8PBJU%FsAVhmdmfo+BAYaZjXb;4lP%+C9UMY09Iwtm>u-;>?Ac zy`VPWYo3@R#lKzvaH$@1rm>$ML};l|?wheAnJD;VeoPx3g^)`XHN8BL zdJ@&%oXqavZ$O9xEN2SEj65SbwiW9lTlv*De8*a%Evrs2g0MBJ5&O7$>jdWRX`S6E`zO z7nW3BF0bTw>!kwS|MvD_Jm15qF_Pd9-tz?he>SX1r)Rv_1v;*$wH?XDIkbnT=N`I- zrBi|^3m0}orW@EjUOhPOm{ZPUir3_01%y3>xPAY*|Cw}9kgfLZuPX(2`cQbgqr^t! zenBUua5f-?xsz+SsBK>Y9SPMV%Z{=aLV-*!tJ0JvGRlD-JbNJdeHM*wB9^*LUv zA6urC+kdRbk>an|-Nh3)S$b_)8Nt-vJIYs1PkgpsNm&#Y@g?J@^5Z%T*7t$zO&=jY z2E2I~y6zGs37O0pWnWCB_2LPGu#`F(c3VS%Fca}qzQ)Gb^UX}2Af1%gBw0{HRr_HZ z(uMor8T8Om0=t8rBZ+G&o|I`>q}l~?NWP;8B!C~6k{v)>Rg0Gf`PQDXa3x3Wux zTuM}51p-XgQu0r!eaqu|?fXYBw}C;wZcPw$X-p1ep&OB}esc_mB6W>0u{c-NOXL~b zWcJNfX9!xcekg-c6gb?G1g+FyJZ?y`O2{L?gL6VA{G961ku7TF(fc**if-t2zo)>h zR$nGUjfr8Z>cPmHxTkx1wX^g980S(--nV(E z_PhOL2d{GWSdK~Iarsu7ro?frk2Iq$hDhv-EVLLpn&=yx3*!Mtkz2F)A$e(N-Y}qi z%C)~L`cTPse@+E%ff`q}a2;T9LS99(&F=6znwze4v9~cQjPZubeBagd5+l}65lcnB2KUgxF)H}rwBsy3( z(HKiDoeu<1|C}v@K!?%(!uESf z8rWt}Y_w}Ee!r4xnxOMY0w}e>K(yg@G4*b7%zs--Xm17<)1(ef@kZw}m@~y(yf8$n zO%e>#^sY3p!5C|^Whzly7trDi7XWm(0 zp^t6?hmJs$1hi(5PAG1m#Udj-LqJ#`shP)z%!w)ZPHT8Si^BHqzIvh@j}W19HA~Yc zvGs55>=XN|m0~_Fckhvsls8Xc%CyD(?$J<*%%6_jklmEs69osP7k4=hy=WJQ{H6%Y z^a)yIMTd9XHG8;zKKekWxj*vb%HBQN1HBpC4IWu945{Z_gM_ zf6J$ot1K_CN3vE~L$#rbf=Q)e8H@^zP7tB-n{!I#TSoJ>z{j2Y)7A*=u0M4`ocKS+ zS_39So_TghO`roG#X!p@SupsUdF)m;D5Fb$42)#)f{;`U8ZnovVDY8wtK*7S54_WS zy398U_$kV}b@iYO7kWwyGA0&~RQKF9c?)-|u}SZO4%lUyY^=3SlHATFWV$}jx*yRB zhfr+cK*?>|x9ENvD+KDWGk+PX8G!DdyQcCKbh$w+$6lPRACeP5s>pJ#Q?UrYDGP^W*-87|H1%i5}2U!I0JcthvqmBFt~fx znB327l-n+u#GvuTUo)2Mb)zPQ(mXEDMyzcaGLfLB!ri>|WS37?4u7kr^H0Bwpg(dE z0rX2kg6$gsfi@mI!w94q={Io2(TA$}d@S?=m^VSSs@Jvk^wFapHfQ>Hy=d>ges-am zehrJsiGtFJvAV%e1e>6IoH3eUt0(L+MS8gbkTe*9%e}{y_q0v6e5P4qy_C8Ofxhh? z&zKmdMNEVkVttvd0-^LbJ#QS-`5VeenSl2QOwoQlhsrpijOD~EVNW}<*gj9D z4OKX#kqja~%W>?Y%OnSV$tbi(@C+tGx83qi3N5lv3SCwQq*uIhBQq>-+oMO%X3#P0 zjyxfymkH*=G^*QsIZcrXv`Kv`^_O+kn-=2)Z$WOXEj*r3-`h)Y)yadVP1SseiFS{x zH<>$$ytUzRuPQX(dTI-*QmnH6Fpe$ZF|_5uC3xNT;oo{1W%>X0w3D6j4$GfxaJIxX zmj@ReM?zjL2i)$UWitcjTrZHoe?0iUt1um}A8lzvp6R;^Q0VS!*IVP!UpjvL-jrq# z=1@rQ{2|WGl@|qO%6x`Lil~tY)5Aoxxys%n%gQ#J+X-#@hMyJS>u*Camj zh#|1mE$@V^D{_j8%U>p1IKhXFELBiAm1p35P#g{_@hsZ~wqUb7CYAlIGP9rlt1|2g zCS#~?W?%iHG0AGPmk%iW5XD5pSx_i%am83l${w6#d{@A~dgeKr<-@kUgR1`C{*d=B z#v$(_42z_9!Kd?ZSKHtXc_d|_d5sI4$L(C4_N8#WO2HafWn%fZef?|(cv5^b5a@&K-){!n!?Do>-0lpxTfBM>VrVqqR7H_r{3B z8F1RUW0M(Ewh^>d!}z4OdJRdCSmOE!F{IA5W9zzuy9VE0SSL$g#YZsraOw2W_ra|Nv_9KyauZ}D zIV3y&bl=NgdClTrJn5@J`E>Ep($BLshxsW&=?w0i5^Nk$zo?CJGRGE|Z)+JAmE44~ z1XMqD%ULM@yu&-U|ww+7fUMPeYs6GFo|*CL)JoJep9~iEZszh^N&h%{Lw@%@dt1j z6HV51Ns@PdYsh~x4L7I~1b|S@d8OlL_y(}@HuqJvs@>XXv`+)AWLZf0fhS1TPcVm| zJ}W)%>nJZikENtcnzv(>;9=_Akv{T-CxF_}>)P?sGNgtnf zL|ZJ5~mYE^rOx#N}DnNm>AIidz?{Y0Sl&fYRvehy0m5n$?mHR;EXoS{d&+L7Ab5Z zNM=U$X_IF5`sBqBjFA(;i#9NffmFP*q~9|l#&b_VsV`ko9NPLs3SgaVpU)l>=qL3?Csj% zSNbFK4;gs<-yDj^m=~8Bp(aJ%XNwt9#R4s?PE)~_x?FFN6>M?351$d#SP+D(BVzrb zP#bY{DW!)ryvAEKW=QjS^$*`6#o`uo%3(cA{k9+i4eDk5D?o)Y)$!n<>2SySPAE5w z2bQNvt=%7LS^NrMEX|scY?9Vadg;&^9ljF1aAjdX9nvR-t<^fHQqvvI9QGnvqq{CC zCYv}UBd9))Hu&>ub*Y>@cJ(<}m^<8<9YUJA3u77BgjQ2zWC=RoCvnqfY1ji%Q)*v_ zL(~C(civwDi27pJzd-Ch5+rC4TNPEROJ1uVv8~gmN32m612EFI)8R}N$W%&wVd?B$ zv{fdzyF6hTMqZKxB;Y^ZbQ)cR__c`8sVH`pRZ@1x^~c((A`!X;^b&sk zu8o)3lVWnqhqL~!dRu2~z!(>0?s?i!Qo>Z5ug6gP%Xa(BfW3}@DC4jy3Cu2&$Ki&m z3%5y+`S;T_<#T>dm(i=m?)*AR#C0zYTB$e4>N#2j=k6ob%D5PSiCauAnaYCEg?BCR zL-et;`VXzeRb`~%fT7c?nB7{9isGaVb;B`HH|~D{aQ$=eUs)#_s^b`rHtG;ti)WCH zYhYOy&EOp0PR=xt34stbV{W@wRg%vQzeEVgxQzAIFwO5cJkNGK1&@2i@t)iiq8{6E zb#IYjGRCMFc7)I-YKhmE$obG5oEFcH6i#uZMvN>;1R?$oHLsJx<>H`D`@RL*6Ne(U=+;vC8DZb=selEq z$LK%R72|9aNd7NsqP~ynyc0lQ2pMZ*j&;+p_*#=)lJki1-P_L92p`Kt8@` z6`HSZFyc2XZ1YM0d2ZbEJ_NaQ7SP)+>-7FRdQf&YV~?iK;gyg=SXHUNS%p)NmY=bc zT)fhDV+_b+I$=Np{3Rlgh3WM2dhrcD@o>jfM%=rNS732h8zq>=X>XBp{YFJ(wbK_s znWeP86vwyOn z+oJLfU23g8EPu}xTY@7_Fm^P@hR?cHZ#Hf0`$q-*;Y{-RMWdR=t`JK$8QZKQNk3{t zWF@jQDt&Bmc17ZTaV=UjdqF30&B1+dH#hZuC$uF+8XJZ={R16jnkY6Few1tufgwyS zHy7WyAkEAkw^XXihaoM~eegNBR804)l@223lC)o&;Q4AIC_yE^x+Q2PP7eo7-#jsY z*Eh!0?VPD$D+QkWmyv(W0~oLW(RbI*F?(&pv|x$LXbt$YH47T{bMEBXRyDgjS+Lda%gwwxZ#r&jiYA5FQ9T-?!~mxm|| z88J0Q8M200a)za+6GJbND`)V25du4V$cBI2d=mQLhGVE~K_Q9cvNrzAtz)9CISwcE zy}?!VZ2t}O8)F)8DE*-KI|w7K z^d_=@f-(31!N?JNawzN!{#;Xuhsm2ME0%fl^Mge3OR)9Ax~61zbgjeNmP*__ZI)mf zVHtb|n;`^j79JEi7Qc>=&JreyW6u|OhBpR@^s38ZMdiq9E8c+w85u8M$HX1m(Y!mS z{-PBShZn%@T!{>M#N<|K++)wecWpT+?U*9Lnjq3Zv}e(clvh9^!{!p5A++;lXJ=PZ zWq;TGK2K4V{kb3TWz|D>AB6dnx%)E#`-<+uGB+}P?>)e5wKvlTGT#qDuQR?~)p>Dp zABnhZkRm-V>r5v(VioFJ2}VD=?jUk8@{*#Hx1sAA)9Y@{$h5@-TNF6Rv^Gx==2?W( zB<~mWxf^HHvU`Fjh(#oRxO{LYh3U+eWY_s=8F*79&=gk%ZCY31nW#-CoLkO<#P!2Rt!l}x z4!+}Gm7W_(RTdBs6|97PHhEF)rS7L7^6^8cm0j))#av zG}$yCK?LWCv00fdcJ1$6Y}?SAacM5oe8sOiHJh{gbbq=S_>1RK*{&03uwVmVD z`kkHOM34!T>Wu$4d^~~w4RI>BtQfr%5ar`)|LMD#i?3&W#^!J@2Cw(MOrqzFJb(LL zCZR?B%@3<%crPA&;PADg1;f1ts2Wg)2V>exNr7qmQpvEo-_{Pm=aZ@rCsxD_Pl)!z ziDO-#H&q+e4s$Sh-tTXq#0|dJ2o{LG;Wo;sAO+f`XNa0-50Z2+e4p|tc0`|F60DDj z>^45Re?Vz|X12lR-boxm3LtzHHW|GlRA=vnHo|o8Ae-ui3(B7D9SOK>xV=+%5iJN)r z*=B|HS;PMfTID?pM^w4tU~x>Aiu~@RKY+Od&Ej+NALpMG>wl_BG|{NvmAp=STEDeA zcFWA9G{sKMWgs*z_ikvKo|0x}ney@U95 zs+YVXfFwg0OvN4BQKPhXBw+hnKJJ|*jr#zoTR+`Yi?>3A4;ZGGVScYFKIdQzZ>0Ec zE4MU!`7f75|H~zes@LJ_2o0a%aney%5*L6r^J?WXjeMl5Gq0U?LJqe->^8u^<{dlW)H8( z5NBALvGnB15dIQ)jOW)JM`6nsS1PlGzmSL6UFF&_$XP zs6Bei$n=1hX~lz%m^T-z|0%DK2>s1sP_)anj*AW9Jb#npxYf*Rjpq!(Hua?>Gne=D zM(B(7uPP%qLh`A0EB#i@WJt(d+-gP-`)0FeQwJg9fr+*K zDKtKf2v`otj=7*#l`*BJ>a)td?RY$iZuljQX)0|!S5pbQ%SzFKo^fkuU-ri6CgA>2 ze!P|7>7+KHA(N4TH1wDb$_q7ufU`b)5~-QX2T%5<_A~ZTKEImIybsr@W7D!;9_MAjp5`hZ&B%T2wDXUZPEUUNx$qwJ@&@h!R8kh zx(`uYG%ub-`h!VP#dLCHT3r$d9t}x3bW2oINmv$Xv zmYg-_);_p@TKu}pus zsbW^BM`_y3rLC_sdhk5HxZggJJD2`DzGO1Qc_~d`Pu!1_E3zJA{)E@te-2q8A>%)v>4McVC0$J7DLJrGSr@18#yrg`{`PqB-)q4-YhJ z&c%V)***ZJhOoE(LZIJxokWQF+gB`%+)<-CeN`MGjGL(nU#W`6Fz7FK0=XlN$ z?&j=b(mGqqqMv{MJb~#dh3VeCZa|XB`qmU_Z2cw0X4G@Xj?(5iFi3=AqO~|cFWOUe zV}2m+#Nz3ew)*Wera+EnDMvd_%c3FMUOBOu^+Do1_qD4V*7NwMU$AorQIRo4S9i{} z$RuF2=gtH7+1X%2g3PNAPC|aiBDN3vOuDB_Qa@4>SZeK;Gk+|V2F++9vR9w68J%g- zG7?73?_Y*|Yu>2yZq?H~boez{ZoDa3rUwE6!tTHoPK+KF>dk%WsjmwWPaB7rGZ!0B zYYog4A0+Ubt3!Mxn_I4C7wDmGlwc{ispZab!&n|3FyYc;WGijbeYAP5u!;Ey) z(daoAlnxF><;-aamqc8fx=@>pBWTsD@3pXK7e}Idj3vI3jI>}9&}%I zz@|!m#)U&dFN0pMuo!NXsV$==xy|w*XEc-l!|&Pt6Pw=bI3eSHJ{K4Ki#r#I0P%bcGWc!#{1YOE-AnR$ z0&zzLs=$S7s>IfSB6BZk1bVF}-hR)RV>|#-A z9;{w>Tod*CPqK&fLE$c!29g6c_aZDmCB`;i=CVABbq+72~uOrKG(p`=Is6p1>G&U?ZlN%UzgeFuksZ6 z|A(O&BteFl{N9G;-KFyJHj>nC{DGrv5^8BIT!BzKSAl;1Hn5L`X+|My<-_E~PQ z&Fl-63o`*t5zIASqT{tla3YeYbo1aGeQ*ecep@({G&e6WW2V^`DdacC9~_|TOxan9 zIK!NX>>3D_8E=Y1K@+>EvH0g^c2a7}Nw0%KMe@hk$xA$WEr|6B- zZ0=6xz{trR8aEX_G$gv&^$qv5eCN>BxF0tEoH4G=2NL7L6360w6Ac&okq*vxfC4XtKT=YnhoC+Ug3BJ%B>psAFAU6?IqP5K7~(zG8@9A57tv99Lm89JS|>BZ zTI!3yY)|}sO+>hQy)7mgMHSw`p=f@&P^DeT06!bY@x1E!^Q5}EkEZ74od=imL}HAt zsdNOaC_8!=Qa9%)>;@47VG5NnvL|%m)-4WG53JMla_JcpkJXWIR_CGDLk2g(Z}X@X z?*iu4Z#c_Kzg2Z98A-6nAo86+G|Hsr@Mb)`< zO`}+FcP9Y?1b3GNC%7)$-Q6X)ES%sLAh^4`I|O%U;qH3Y{@(2UpFu9TVvKb6GrMNZ zs%oL6d>YHAez`e9rD}O8{JFUxf!nq(5!F51$Xc0ZqgsV&=ycCnu8@Ozk$Cgp)BZY3 zB_OgybRhm#VE3HvR}mZne zyX>|@WzPx`8T`{6C6JTJCj(CXt<8;v477wHTNKd}k5fhpO1y-R!6FdJa4oP0?ZAbc z{zk9256-f{cO(cFfslbGl0`r*BnO{h@4Ai^_+$||_Op|a?VS!%d)#t2mm>q%BfGoo zaoKPBR(GrhLr$oy$1DZ_8nQ{YQiV-xy6&vrQ!XEIt-62s8*Sd2bRS<+U?Py?o_fh- zMeDo43Fj~di9l*?ai(tNnQfKzH-73(@2_9dYR51Mr0&`ouj8Q`Zl`Os^~Ne6ujf9D zJePjP-wKz+Or(^hEW>f0#+zK$&i$0sN}4EJ95+>qY()lxmH6B|RiSBviHAns zC_?gKwfwDX^MJ|7BQYwuSt)fS-zb-%m0GEZAayHx0(Ev&c*2CJ!?0waek#5G-Ag*9 z{mA0YS(PMZP{}EoAt?D>*ruO4tiLQbAKjNvJf zquM@aULE3MXbsQsplFBr{6%lEx#XoIbZW7|(4ep|I=aEL%=-w=+^SeQ@`xAQdSU=C zvvH26b!cLqSoq_C265WRNFYOZb~F)k-=^F3op{B@glL6kTQOxB<>$PgF)ApBr$_87 zerqkfr%oeFM+2S2=Lzi2F0gG=A&lMEcc*uMnRg7x|1`!9+85qvh~$O%j~m5C8?)HB zampFrGUOSfea2}!zt8}(+lMRw-+0C3%sP>Vv3Qm+PDSM|CEvp`I64?t1#hH@hnQVe zOwDlg2N04CmRW$Fyu{NKD&&m%cP>_Ux>XCF?|-(51g@p@21Y!sT_7ru$wK7QfCdSm zoYHbqEj0#-e$eD6Y;&Mja2@_SeUV97&v#Qb((iDQ-=q%BfBuXXKLlsP(-M4$8FWU{ z9H+OZy!(@-lWo*@kXrV|G&FVPG?K!zSCv-Y->8#&D}>@5;{6MPEAik>X@B(hvD|x zf?u@qT1MU>T?>wz=8r_)Gm@bs%Mksd}#>!XobNucNe!PagrG^ zq;Y!MXBSc~QA%x`lFq(`oWTO}G&aFe{~#-kTUwNw@(1~yYH+)Mc!cOj^$e3JJ?^*u zG!Qz13rh)C{0`H=RL+co;E6gii32?%VhH|`VRuDNXi7z1g(*b=NoSwhrp!R{p@iEn zlBpkG0Z6*qcNESIT>3pfotUle&^&dt8ys4A)28NY@JvLgzW~$d=Y3j;^Qbc-UV6H} z41V|Z30%um@3iFU&~C?sJEBumP>KGwm!P1alErN3+z>7*U%Q!_4hZ>xw-%l#uMJc< zuikWRWpO@GaBtT=S*nHGvR@>O`6dd?>6r;Ylt)@WAQ_#~!N>d3_gf!>Dma4Wvr44Q z8gy3d0AV5)pKPvXd_v|AbLz!mn-yT3oTC$k#gBqf=_W&q)J8vR%c@kx7>e}-aiK)- zE5~BkQ%#bGggxhfndZ3v-)0PKjj};RIv&~KHa%#O3qA7GnjG<*)bilrKN^0q;>2-K z{amTz>=ifDWI*k>3@`*sm5sRZc!*d79>(qSh7MiL^xraw>~DTvFAD$Kx~r8k%$a`% zoOhncP3&66TR#~dDyrB$!>`Pj^{ta+3om?Vq`GxyG2BH)CUbxaK^4Lc7VC}2O{2*u zhOz&!(&z4Egaf5hj?Zt6NFD7^c{gtM44kT}gv!yU*7banFt;e_Sqyh#0k!E3+>=+aUKks1H(@0k{;9i; z)nlg|4oj30zL|}u_d=0|jqKI{+V+l4zR>$>*VWe>499sMcE**#fFVSdAZqdFH2g0p-L+sN;rx^(0eH4R6PhqjoZwz#qK+8V zlU;-e{_Z&HsPJPrMS&NpzY5Ka+KwYMFnM&2p(o8XllcM=v%zOMbv26!m= z&o>AOVgK#DB0@k@HOI*yfBUC2uAgj>Q6nn z-Jcl2CSnu6=t6{$*Yv?MBLa;k^Jx-%PG?Q+uDbJmcEmK;ba$>osVVmGyf}LZtp)zj z@od51iuIL*nxUbgz9B_w!;^;@X3JT6WB1zaM-wqQUA)4jhMXKsT|Le#Bv^!gsoJFE z1m&PL5?S}xN4*?_cUNR04rnv_rZ2vwn)7r$M~l`L6=J^A`c13bWLhSo@e$+a-N)lS zL)Fw@o~dR4rbe%fZ4FnZC*_Zbg_#}#(R_!FO=|@6%VXzWJ4iT{U%%hX`WxiQ_Z45; z_C4FZd=Y$Ib(-92+KKLM24w-adP9xwPL{$CTB4ViHBT0+p;sDgdZS7Bs!T?4tydak z7aCts>A>P?JMb0QTcnxvraO^)4Fkg_j#P0p%Fhmu z!CTUc=f7viJ~~8$Ua(|Jo;u@U!`Rn>^xt$L;NL+$)B`o{PYCW#zuLu7OL7D`F=v^( zSgdRJh6&McwLd`Yg=;dj!ugr{=G3?mcK?JKASB%-7AWt+u=A@okhJ=cTwWr_pe574 z>HSQWX9*`8kW}lgv4f*@74;?^#(OSz2v|E^Z1eokP^KAjHi0W%|D&Lp*>jAZnB!6= zlGulwpETPb>$4;8>9!21twqT<14R$vvU&{e=b>@3Fz8!8jDwK}*r?d+Kd4PH#MU94qy1b~Z~ z(s}>xf|U5pE&^C5*a|T&>s7UNZMLKAw(0;^Ad?zk&>KRa!;Z4Iw@1UoG#)lckPkQa`5nHIVm>hYe@VY?0H0Ph3jkyy>n=f&h`cAtyXd1TFn=oA=jT)>c z@cY-=+4`*k;6tH?Xh|Zknc9hTv$K^$Q|{(pFF+{hLth;2%FHQ)Mr_EoSEYUJ+yXWS z&rRl9(RSJw&U+5c-tpPwo?X3Hwexfc0}m{ga_#~^U!3f|zOI)q0-kXoe%01u3B5l~ zUM?ENe4^;p9lLesPvLeTr;^JELt<(}hnV{FCtyFwKEI~l)X!3X$A^MJ-+{vY~Zx_ys9u^V3zLS!7&R!LR^ka(5 zDIAqbUf<9MWh*P=?D`rff;HJf2@df(tH?-FyKaMNpg!*#fw)~2}p;eeCaoyAo4+0oHq7`ntG zIB`iR_o!%b#R7&(;M=C`7GN9E#tM>QAUS>{sPK}#q2jh(Q6 z=#tvO=A^^0C7LU%6NRkJfeBal&1{VOHBFRcUl{+bhvWW~sP{4+U%!7!+cu;5KT~>2l7u&|Gv?@HgR)i!lby@qyS8FDO^xcix7wpNV3^u3&;#Wm>+B>j^zQ%@_TNjO z4`YIro)@L7DKHb@9LGkYcw(BV`@P)bt(!CNCc8#9K45?F$85;>f(TT%BM)bSq5hn-5-q zKc|sTS_q4ge76Z;H3rDojFdN+h3Qw2ZxlB}&gdA#mY}f6wYWAHGB&3D#UBd-JYTzf zy6y}Z&Huzkhql(usO6u)1aE0>w-U4Z;xDe4uw_I|b+&wm5fYQ?k8Ib_^?eMkaw94# zTPP;~{ShrPn|Fg2Y`H4SEo?R=y)>G*N~_juTzz336TAn7;L8g>QVKrqV)f5er}BHS z+O}Pax81LMxj$@$Ivf=j3eQ9yTROY#GzOPCn050yTVbegM1?c7UYFc@r}i{UDTKEN zFP~23oA3-gDsdLB(%3QL6svJcyiXvsQSk@%Q!lOjRYz_Z|0hSOkn7i|nCV+*^H6ur znST>5&iF-L(U*!$*XZd1DNj|HS)MsnL`qlviCXhYkB=`dK_|iegDW}6Khm~MS0y23 zMw{DhU4F~a%$}F5Jhz}*T>2%QKAH+~dTLoc*iXQLWZm};4@_vVh)-#QKUTqtmM2Ae zZD&4M6kgg-MVeQi5h|rh(3QW=#1BRq5*Vd8NK1VrJ`7pesm0-j{78H>U`6NWuTTX& znC*=c63vhMY{`{gy8SeGP32+*+sd~!I--0OZLA2WhZGT)aoSw!ZIOQqS3MU9=-+4w zB?w(QT%q=44U+#dt*mrG%w~W}MN-SWRe1c*nkLShg^up^5SV5750d#2+qK`~;?k=_ zB#*j(*;*?jTvfmNbmtjKGU5n=pJ^~G*HKwnF(gK|_K$#>h7smR|7^0~lUHYARH(`*Rj_!c@bHrxvBW&D{o^ zZ+o3a>&chZopa!en+b>ih|9AV-H{woiZ08KjWI)&4XlCO*`9a67@=3Ea&aPT5lH2@ zSrj$GD0{vjQ-AEjDO!1L&TojLFxxCCefM{oc*Tj37AL$`H`Mx|l7td(#Z|6T0A%B- z7~KnrPRWNoPUC31J~}nGEb0rZrrvo_uK2bTE48TgiZio7i%Z=%ZrrL)50yOHtDcjx z%Mb3KZ(X!l%okN88q3xiE@j(pw@K8iZqV>~-i-HNe550hvWce?h=5<*34P>K?kQ^Ned@Py&L(~VY~YIA&WLq&}c?WkwNQ2($Hp|`#JxM0x3V#aqw zUymmc<+{oZ+dXq|JaJ*LJq`M7?Lt!7@l;|QJORFkX>~mhKWn?Kx%`8c0O+0cPcVF} z%P{Nw@$1zgGf~UU-<>Cv`6MFpcyaEH88DqnK=w)bztKy12U+Yr;Xc$)#&LUOUSyn) z{R_XT{*`C-VMHP|vEukWj+Pn9U)sM91+d}`E4M(Dr`m`)?AD|^71Wp!KWmE0&1ub- z2qi0v8zx~e>wYN_9!fk=;P>249lA{(Xe?jLXn(u(i@I+4BvDyZG+1?(FnIYQ)B)TN z`7Yv{JsIP1?HgdHbITkO!!4LK6AP`@O{W?+!)%Pf!x3oXSTR*TdUYi4@g& zp;9#-P2olX?3g_rM<|O={i6>j<6f7$?b2J*5r8nX*NA9P}_tT4MO zgqmSN0idQ~GlHz_r43vI_Tjq^8wvNFA zm4gCDYozhLg;xxR8;4Z2>_o_HQMY(uUhm+`tKMHP*Mn!CFWJYx`dFgw8E(HS)G=^D z2=~fWYzF4RfdWcvo8Q{`=q(`y}lT9wmnPVfiaC+>C!95#^&4+Rj+KXL?8|0?X zk$ER<)vlvVB#`^8w7{XZLGwv*3gOH{AjWWCNA?$ zcDNrOm!q-y{?!89thJ~)d89ml~Bd7qNGwhqTB`Gsnx6QYq;*HYUM+_=bk2d zC1OO_j}I^yDx%o=ZkR!_;Gdh0*6U}A;j@Xc)Ks=sA{g)WN`O8n1LQH`G;uP~<9(G; zTrQ@gLz0`97gulBK1SV#;g5QVmj~)fHJ{1pG*gQ=L~obzkxd1^`Z1t8ywY=T=iAE3 zAg3Z@RVQ@_Y|F@m__5*pjN)~FCMGT(B8S^;FqXpdaM6qA(2K(L>-TRM2nE~X!Mt+c zu4^;#Us1J+Feb5nNhy>3?XQk>CY4WN=lRhSF-H6qagAZ+^C^Z7w^wX;SoNG(OKs(; z^%tkpyg*{cM|eO(2jZxFc!FeF6Tw|72Ij3XVEXbl zT68A`5B+ukS(5`BM4Vc>je#fAF0e1Ep-TsSAu?t$tDce7pu09m&e?(jQEvH->N&kp zpuF-T^?AkB`(fagHja&z6|&Y;AT%{uXcUjf2VQ&E*qe_ePFbV>kk8dvhLv^%Y1bjO zG1vZh0HEl8gua}Q`nA(wp+t16uADAQE`Kj` zaj%<;p{bjHz)`;Rf@5&gnK_-QJW?6FJ%1MGk9gnU+6HJO>{xJT@10uNFwRciP5v!q z+#CN&Z?@48p<4x+{Z1*^vYymD8&k&JX-Y`nL28sC_)@X5Rw8NX0jnt z`iaVTk zSzWz+J)_Zj$uPx_`Eae33ztbZvZ{(%e#OFy1GnyppuX%f8Tq9pghmj~a!glmTxTY# zUCcVQq}Mwm=}GYA^*I32>Sd4NZ`DGBx6zUQKUVV+ZkIyl{1jH^)WV$+DW<#X7T|o- z*dopseJShC#+3jamuO)iWrwTcQx#wvL$KWTqwZOL9yI$mP3)j;X$b4 z)S`X6c=5R5V7vjSaj@Zp8;E{Hdb&$5U>Z0K%eVqD!z%CISj?bvr`up@xrvGRpnU)C zh=I4UKBKJT0!y^O{{aG@?N(7#^s{jBsw4!jD+y%dXfW225JmYh{Au;BWm4$HkXlg* zKSi5ARm{~NZ_)mSZypw;zUvQm#^-uyQq6i=%jOmdvG1Skwkjpjw@O42vGr} z8ZjeR$}|ju{%so#I}bT_iU6&xOdzGf`6O8m^E(8U?kl-)uhDF_+C00}FrQnwd|X*} z&f)mS;%98ZyD9iOxx=W{olYnSFh>*e^5R`*kyl}E^92@-H1hogB-He{pRTC4L1S;W zGIHT~!pN`AI+bT~Y-6ypI79S4q=_HZbv!ev$Q13X{rb{}aX-6)tj5%+ zzprmgsl2jqrx%hcP|-aC9T5IMx)~i0{=eb^*N39Lrd0h}aIAR}%#Oj!~+Hy$%Tue9{ zNNk&YF`n)+2gY@qIV#?88#=SW8B@~^Hs}}xF>P3_ZLaY_QKfjlT8@xC(K--GW|%_G ziTVJ{4WU7KBA`#)Ep~ZjlUnV=n_BfnbqF!m!AB#8dnc;y6RF#U6&;8!95_Ql)zG8w zJ}5PJKKbBe3So*k7H7_ilvZrO#x08+$jo3UMQe)32#Q>*UPRKJy(bqdHYMKwMld0c z=hUw0Eo+j+ailjaOfJ6Pvh1u4CDEjiot8U&e}&y7k5+z8(<0M~OzBU^P;{g1T1mS- zL*Gyw#pNGqrs0OyS3!0(X$mxiM<=0C%}yr+)+F7SHx3#0iglNyFDNe8NH5QH+;sV8 zd&Cw^a3HS13deV0wX@ zA}etjpq-&2u22CaP>cuVL|RkU&_26~diJxH&P7L(VmUW_wYf5tq4M;kr6*9(Pz=I( z&jhw%@fl}XSco|E-U4I6-bnEo6A zy=~FE81uwMgx-PGLyWUJm{!L2f$gC;JN8Cn8`5kL87?W^O3S=aDLVU{-SEJ`O!G4& z#IxVqkeSw!IC(B6i{VvZ(^=M}!*rh)dh#p*oSUzINd!4E;j7t_=0iv#zJ4{|l+Pxz zwhPGdhS)n_W{QsC2}O@FGv=vG`bH z#6~&~b?13%K#v!_RXj{5FtP&8a{-oZ!L6g5z(d7K2K-w)z3AZc>(kApd~V`S0qAe2 z&I{!KtLX!*+JX>YJ*&1Ky{;QSm&olVnbJy)8Rr>w2bIa}nd%13S>hxYrfzo$K_=Q+ z(Q6)oEacE32~&bzVhJZ5(tlULqXBOt!pDUZVtx%*?ps?|RxoxKj=n{(1uwCdb_Z4r zl#OPKnVZ+ef^8QHyFp6y$^&<#*CoPHViN_NWa+bvA`U0^JGG>xw7|>WkYN3>fUz%V zy_RUxW3|og(v>Vl^mOK{a;hSmv*ZP;h~I%u&Z4pfm(axPw*>RP_A#^tb$_Ie0bJAV6pYkWNuHJ-8Q1hkP_#n5mdT@dXYaXwhR ztG9d8)5JM$Q}W2x3@$A?UQuF)sR=YgLP6)9o(T0Edk1X0mXw7z^VoadD|v=m6y?v7 zC#z3!1B--j$YFK~(LFCBf&n?2)+$dN+{aZOx5|rcM^fBGmL{*oYvNlYt%Bn3(~Uir zXbJ6THFtKx7SjmGV;OlRcXPhE20QZikZjCn5rqPV)JC{L5}&*FN*1#7ShIvLW^auk zDq9j*kd&0*JWobPN2~2N1jqR`F$eH4l~B4z++;L#^w-01!m=ucqY1KbG*Eu3@pE{)vkrQ1P=9=d)%>nt0ow zo`LF`WiF|>1NxAS4?ikV^6{_LFt-`uXx5RRom0y#(hyv5JKBjXI-fyWjs>s(1I@NS#Zyd{=rLRkmS$r!y;p9ol-(Z$I{^x?& zMuYvAc=7v~WHg`;K{7d@{-)L#CjR1M%Y3~0qUJ&Xz*ENofAu?bI`5ggQb>ZH9g}Bo*a|-9 z`>h6DbW+rhq2fNbD9^j>h``o`Z}F6oDeg?xcM3LMWd60Oasq4^7@0qORsbch_}qs2 zd44k9UZUdETgG>;y@;b%Y}8dxSwQ^(5(?Y0BwW&G3{n7hv~ z=FOLud#xmMOqQYWS9S0R@&A^SvTAhv2_Mlrp#14_{63XzG{Yv=wuvw@*EfGHcdgJb zy3Z_kHf;>PQ&Tu^0^ZQWoD|rvr0U#o1h1#4RGU_Ncn)huGGFG7UU=2E;>S9`d>Q{d zT?_iW9tk5VN7`m)F~W?=llqNVNK8(Tk7};>(E6*lBiCkRi;=%5H`w9-TH9Ced** zLhp2#wh`BQwyCYKsppPQc7||V+b=%I+hr%kn9(p()}7HFH>$V2pEA+X_VffJv3Nh& zuexpcnAvEp3@1<<&6jCp8F5jzVYfnaj9I)bcl%j{Wu?&A5@nnVPxdlCoc7jDI$rU z0XJs?Via69b&X|nxI8Yw3Z0^Il?SFYj;tYRM{J9)GpyXgy$U)y{f#EE;mvJ`eS#vD zUp1siYfx?H(Yg0J#@{m820mH6bJ_DAs|4i1{A${y5!7SF<4|2c`U>n10Wx`E_foAv zLx5Y8;E^Q|wbwh<@B37EVE>2?+FZsH?|UEH+7i&* z)YK0gIyL=~pI=hg%urp=*T>~4!}u@zuq36VE{Q=aC6{uukRn27w}F>lubl&jF{_vh z-di8P#XH?vYrXN!3Z*V>oSP$RF(r$T5C)&8dw^QWXsoZX0TP@bHQU1cQZtJ3()t(LTJ zwD#s$(ZVPStu@B<|uz#M?`O2X+V`%*C1IeGz$KBtz6nb=uTgs9b zvqO_IE`@eDme#@&mC5}97+YKBR<#ZrjznCxeRUQy%w`kBC(HHH6bW775fME=rVg4? z=2zBj?iy>8knEj(*Gn})&ka=Z1;S~WfcIB7#+y{Vy-oPvzBA80W8du_4gOBDoc_z^ zfBUGuiE0@vve}pc-Ht%CcxuDO)mHx+puB`dOu(gJq4JFD{gIAj2*t*i-o{_mLeHRF zBC~&wCpz5XbmxGA{ToAr&BmRe>uZZJuH8Yb6^r$OOUTN2gtf+TdZR7!I$}f9(9%Om z8#~&wTbZz=Ib}%9JCY|#N{DQBCqyHq!$NGR-Q4z+gOg&4AHK$O=jDU_tGB2~D+QDQ ztXyy9#w);7K@SjoJ9;MJA{JLTXNuPQxmHiOb)uKQx|dulrzBU>X=PcN`aUP81JQ;L zWxE579cu0A2jPYX7f6?8;A+NG3$YTG33vzqhBT^qERZDVX{67f(t5KRmUWWjo*7_G<{3vo&eM$&~#zp-REPE_q>B zfiyTRcwsS(OLQx)0K|EM3RWXlj5u+r**Pepl;bd~o!Z68KqX!L!H|wO?Cs@D7NbaE zcTM-XR%`wUF^-U&`kWa(P8sUrdY^04l|#QPjA#oUI4~r2t*By;{p_p5Fsf7AqHw7j zVKImLDxnP=x5^3~Jyqf3IQ0Eq?a)<3|+t88cjNFw{rEqXF%b!YlAKx zl@{ISI2Om8+J<53KbB&hRm?KL@Nd~u3f%NpT2S$n29H;J_(Hz?oYsq7@|0iC+V6EK zDcu|A1@wgi$%L+}JvkK>#Y@@;Ds=7D}4F4i5L+c)bx*xt?z@!i!fL})O3&MQkEO-bYn zQ03or9crI~z9uE_l5^$+(&C|@%miInXzy9jxI13qtc}dy6J$at6dp7}A8gs{TN+b7 zIl@f#^a!uIuGqL=k8|;8z=*4;V3T?te9~BXRt((?F+SoGeq;IbB*)r;Sl@U1ZjHqr zr_?z4g0uEM$Ls`KA1m%1M2Ge2`fndV^xwIK*7rBzo%qxFItFk1c9Yh5NRXjP=0fRE z-M9y%Uah;uy$mPFp!s-+gkl*dOhiMPA z_wR#ahw{Ca^;Wx9n9+;hHq|tM2O#vjJofK_OyBvp=7!O-|`(1e^jW2utR~5W2#?+~?FCz;wKvdlRqS zrdqFl!U9p$KbzmeKvFgwhxfEW+e>3M3fI=$VCNdH99RQJrADa*jHZ=ssTtil-|}77 z>tK#d$#WBo1#crgl+DU61t#&Wagka=LkuldusyknYi3t%xw1TuzR;^zea#g8q3!#) zCnJ$clSYixCg4e-om?=)7`Lm!dg|wjE%`AMql+X1q-#00^df1C}U+EIe*?$Ls~_J z{AaH2wY-fDkDA{V{LyiV@m;Oy8wsie3`7U>0Z@UXaMBwL2lmDQpG_~t@|6Gd05{qMGzY)qvyN0gVIqnw_pd;h_Y*6<=5CAk|xM44T#$$;j%N z7f;9t(j()N*J65G=W}v$x{VfWHunxVtT_RFKOufQlLn-XU}o@k022@tR23sH%IQ=-i*+`w2k+U(~(xFB0Z;WAg~^eX{H9Cy_aJ>iAURVxI;q6K^|j=e6A5 z9zl;%58?fW-9Gng)K*#qaeKoONUz7yZ!ThJeqvmHAH>Eln>M1>lHx>2{FCzRNet3F(m}=l8}GbI)V&sB%()vr31SJ0T}R2%U$Uu%j=`X+Rl^1w;kf z&%~U5Z%{R8zEfkAfT0Qdo-dZ)bu45kA4>dan0Hk)BP*d8cRh5VAA7G~(Cy6?y8_L_j`W3NnW$z;oSJ}pfxIfr zqNCl;*=zKw`G(tjzP$WSs~0l9Sh_j~Z^hB1hmORLIyLZ(ABu^y{~8wo|Isi|gTWzj z5V=s{IZQ^WQCsh_s(y4|y$)HiGZv$CHeT`c+TFMs%6%|G z-O9wpEJ!IKTqP(iOGkN1f60iJOAtm3CLfDT3fvRVSLl?=b;;u!H60eD8{1e>$g$rYJ|0ToG|aWK!ih8?CE3__B8};^ zL1CHQ)?kHiZFCZ1i%R6_r@Wv2dyoAXUkv)Y*x3eMziU|$p;`!v!)6_Ezl|#542{X^ zIMs{!-xG|brM9Npir;V~v=ZpW6~PgIURN{yCMnsp8p3k2_WarP1&qE7wD)x+rHPJs zP+d6+r<3YSBE(TgEGP8CGv?+#(|`vDESINovlU!i8~YD<@@X+WZ6Xf`w>(Xr>#bq7 ziMgCAQ9H8q^(+1O0;iw6FW;c6_{d}D=@QbNTple>p5|L+Z6zh)LuAup^1L_=4yD;J zur?qK;;M@ng@Rj-Siy|cKmW8DnFppyROx>Hoc+XfEU`=a+KojG*4b#sqoBCpyqp?* zHW@g|Go=VO>o%q$B`P?$siQkd*DE<8=)$HJgEG!m0LPI#w-`fTnB|@jkrITDCnnOE zFVilby6&udor`_{jtEX_aBk~Q<#D3Q^4KR(QB#A0@O91zNRX-;mV<}B)}Z!CQrgLm zV)D^+vF31OIhz3z54j_j7F>_s&uAh!J%Cf^!%X?>`Nsw~jr{kNAT~aH`Zu&GS{Uc= zLnwHH-*9-v`l)64DvC@y?)vVFjEa&YL8>F6#tWz~}YjiYf)lEYAOC8lV1 z%w@pCZB%|s9Lh@$2^3QsPfc#cBbsmj2%ModVueM{$d9AuQZ<57DT!(*Q?(l`Ma{?N z@sNbLiaGy5DJ5xv=~)k+B~__gr(A*naf=N^>w4}u5ppDPpb82{P5}KKnF|GDnme9a zy8zm=pYE4qtGn07QoyEl(dC2d2?r^OSL9%706qBPiw;R+qnY4?Fw^-QA0N+bKJ{_c z`+8ix#ux#tDGfYbY24XM))N;K;{mH*e)zo}>oskK5OooLkM%pjdvU$sJ6U|-)t=ye z6io+hKIds-?O#)5r;qd%W-okMnZWhD8%WGlOZPt3WTqNb*_$Z7sACP8j*F`?b5#QX zcILJ0`VVIc=MWygxw~^SHt%9iIwa;p@`)f4DgU`-b0V3I$#eUBV;7W!FaI;??Tk4w zFQmHJE~Se?GIpYe$?LCTsgqjG2oayBo*Km#r^SME^9!CK>k4Ox&^A#y=z)LF*p#s(Jb@4cJ0zZ+b<+Bb z?}mDLJ@*^T-~r3==jkJfc`%EMi{TM0%B)Z3%Zcc5kx2bs9I&ymf3>s_KHr^+C@Epm z($Ol|d&9{QFa8K}ZAZsQ7}L^zRPmk_E;v$YHFvS{CFceKOiG_lJiU&sMyt(MSmm{l zjU+8m#~u~sN*@P%@Rx&wHr;&$GL&|fO3TXno|6rYncuekDi!8IghYpNO&}iM+K)brC(*ICvUB zBraj*{Hvy%koXs4HvMSETr-2VF3}JYiIX=SY@EZ!PW=;&H~e0GbA?AD%>>(JOdb@Q zVxHm`d!@955INX4s605W>&Bwx|ME*nKEIs^^KIUePHvHQ)1KQqXaXM@pBV|$2uy2K zZ|^)i6g&h`i>nwzRdj5B<`&LlKWnfm`krMQC0F3teYkT|I;LvJ3qcj6o=sfexc&h= z=>AmhRS-M0bTUe>=6vdd5Ft;}zt1uGiJcs^e8tS(MnE=#htlmJ>P{zQdqEa>d0f!A z<=To4W>tlJANgseuRlp)YBS;qlP6RiKcnf_LY*!*^e4!_3H>96GT-QE2WeZe7!RRG zlJMc?=jX$tp!9=x`JWw^j1O9Cbpuauxa)jA3-{#4#m46h=|X2WGwbH{{9Gu1lmi7R zE?KrQSkX@EZ`|!=?Qlbc;{wgh_t285FxdMHzXgnkY#bf&gQ~z{O*k~tzTM#jgKC%t zMHTSzz-7>cDVCBniQI}mLJjftgwkUU;#0Jv(5lv+rPf@L=ZAAT)?Xjt7XO8tMrRBQ zTh&fyZ(sQSx0elb`tOAiM*RxW$xw(J_G3^?3Ki|umwI?7qYtz+mOz_aVIt*K|2t|X zfgCyK_GS-@#iRZHI?sUQbW52`?l#=%dgvc9L zX$c5&YQg`rO?{1)l@45MUqz8!IjEKU?oR>mL7tDp=;%E4SG#*9bw+APf1YK&8e3a= z*Irp?#eD0Wyn`5sGj+jI=Q&@V=qKHrh;2OL$WxGdfv4n`qvSbzersR&W z^;n7p%|#^K-jX(+oLGauR{fWU%MkKf(odg0J!}J+6tq{>WwmXKdlM56^)u-FbL_~RQp0IOIz2v6_>M-EU27nq!>ag224>@H{dK5+5L4>Esobj<#?G&^r~ zA*1bfj`%ZkxW9E$zPsZzTR47|c3j@dN~=@=`*rYzz=?DtsKRFutO+4T4Ta`RN6GX; zh091xVYk54^*ZI~8yvKnSUYv4;NrsLb35y6-0%hJw7Py0653GaIFJToi**kW$<6r& z8Mv?o&RO

a~a10r*i(wnsprjt1)>4_Z)RTB|(Xy+(!t(yF6Y$Iz*DtdQL-6g)zj zw?#vo94Z`ZPmi$;neW3VQtyjzU@(Hh#PVlmMxgRIl?W%vlf>%tex>4@FBi-XpBP#@ zU9Q7R7rkg_txUOpQUmFQWok6D9AcL3U-xu|5zO0Hl)slyhB1%n&4*b1`N;>)DtGMn z@_@wO5cFjPdh4^?(RX=b=03Uc3pC{*=qq)%;QVb0LU?jZl{dBv5 z2pqbLVd`xCjhNB6Bo@@u)~F=v1$HA1F*>Hy{rb4wbJed`4yY!gM zK8X3*4Fx?0>Hdi^UC={zuP+8hQ354#arteX&&{+%QAx=^BxTstlmc6xyQGZFPm*K} zLi8P1_ko2c{=-{>+bmhFY95-hw28tY$^C_)F=F@m{g-j~lo)!^tI(7s5_OM{4>^BR zF*b;nTmH^{piFQ*wUP95HH7k~4xb~6PuxizrI+Xm>a4n}Ra{L6wl{V=e>IewknbFp ziJ&_wuUN)d)RAV3lJ+MXbb4s+?CfCR;e{R@iFZ>|QW8r<`vOZw3}a+hI0aZ-u8gAQ zvLYZ8g>$G{CiiF2zuoqU6yUcuZv+i}iO-Hxuu3VU1vHb?s!w-}D%=oy^3Z-q0@oY5 z#r8dD6##Y*DP}adkz+Q!jNVBo7}W@6u*&KmE+YZgp};j;31b!ZKfDE7w+rtmp}jdH z9CUO@@6Vm@z<-b!xBJCMOP?Tay^@lWD)8j7OuHFVLqp?Wt`rZfdlPv5r)062BAZTT zMcp2AeY83kcRq6?u#RTVHZx_-CYy^dvS6L_HM`Di27H)+U~MgMkOQ6dR|jsmTQ9SQ z;8(B(mbD}(?-l>fT#I&0Mlump&5(#D?q1OIivoL&C>R-|ck(A2Y*wm#o?Ry|KhKd& zjf`W-j$7-E9zt(?1+&5-i@U72DKAd&kNgqpcqR+x*8=*oU(-iJO?@?|Z9UCs3B@b&?9b z-eA35#`x(E#eS-JYii==znz6z^?g2F>^L*g=0BENTIDY#S5D%n0qS^KLvE}c2Dy9ZxfT%uQ3BRo$q;0k>4FxxD%ga4p%3X4%l zan;Dtiuo6qj_di1n!*@6gA=#ACsBqu0mI{8BqHaME6U!CxfDS6N3Ifhdwbn6t%lz;z|xWcoJP^jd7wbn}E4o5TV;W+-BE_(df#8}{R; z`azl-B9DI*oEW5}XG^(vY2e@kutG;=M!nt==d9z|R++qz$;8A2?1SU0vY4S#-I&S| zaX8FR-d;RoQc$uNod7i=zYnecZ{qapfN)6C_0A6B(xx z?j`#|rw3RW`%O7UEx53yWv0*&OHeK#F_TpYf{3zgVPfPd_H@*INEl`rpMxK2v;q^mu1) zz>31dH}fm|N@qAG*<@BsE7^E{q9S22oFeY<(4ZR=(}ULgp%L^DLSzeWm=|kIfS|{W zjygHH!66JM7)*3Ls3@Z^KK-IfyRnU!7UG?++A2 zN=iBf>F#c%OOWnP>F(|>=`QK+?rx;JySw3TzQ=QZcjn&zUL8ipXYY4CYkg`F-OvU! zbnIyfI!CfUbheaUj9Rz(Cn;7+PE!xX&4y|k{OmwS>~EP3(^;r8aC$na2L*|aj;?qJ zq6NC{JoU$*yT(+y&kWzCzKe=(ape@PEEm3L&|i;2wu5o@TDbDlSolW`cD`Z9!3;*(7RmU(xq2vT6* zY+clb>n}wro+cH`mR6tu)icVLCe`wCyQJLWL@z5VJ5g;UZFleo9C+C==(IsnrAXfm zfRU}>uu04(PI7VvCdvkd8zo<+$cmJSS`}L>h59dfA!t>tj(s{827(fi6ysEF`PluX+EgbO2MYPHlMiu9y<5d<%rCJN5by)SO zhMw`;R&r2b*IGG05{q5RTdS5TCIND$9GW(8Xy~6+#)ju1E+qbv3s!Dg05W>DWwpte zb^&)eH6lZww?;q;c4$gzz1hK6CnTN~gFGW3?TQ?LX8M!? zaFT>=Pp6#k-d}@u{XwL-3Fk>6^iGp%eBLKgJ6yU_nI!oU@A~7*6NA>vFq@PGL!IP7 zGq0)4I5E2+a+VmatTr8@lmXwPGOW*t^iy^`%C52>XXIHMbNXuU_ ztR5_4@1M^7I*M7^C{)xYT1ZWt{BtIR1~5i?8jt-EJm=e)?!(a4El-CkU2L0^7pJBu zhj>Wn`2XQeWEmgpBP`(0`rh z5aMYCyai<7fLEQEIFn_JCoN|2LLfBo)8X^_Nk*nbN0BqM#aqgm8BSPk(VLj-}Ke?VCV|pf3?6ea%lNVSNkAG{QVZA|GOuB^MinoLC?aHnN-AnUU4=<;)B}Ns}d|j z2JcJnNM7}$bI^5Cxj#y%rSL3^Ci7PG4RuHfv+|epaM~_U0U%G@D7TMdOe&27pNRxyo|J>AYAlKhuu zyvEWvi~p=N*%udm4+Z8_iznc2WOlE!SfN1X0L_%kmzwunhiOoUt581Sk(j*}|1+Ol z2d!zJucez0(RMK$;`!^wA3G?uO0)0B{DhYPGxL3E9U6_;TeBa1!0pKrAvgCIF)=Yw zadCcQ(^H(2k_mE$=a&wDqYHc1#VT0e6KMTl>PSZ9;fm);C3iI6en4JyfTZ3o2?ymS%d zvv>rjhRl}*sBRm)?6W)g^=6jk!2(@+(S>08=)y>ThDXS|oMSDG(g?$-2x8M>fU z;q8MjHj)F|r^;@bpt)>Ey&oD345o`ZRvHZaM4{dlM#2${q$}Vg*Gc12rq^pCR?~Ph zZ25e9cvSOHAZM>-Sz*C?GdJix@OoZtRxi%35wyqi z(Khd2q=g^}eDkb8wpw-oxm@s09-8k28A#uSc%wb0#}vHOn8VEoB?xn!iL{J zF&u8`K#QN`8fRc;N!P^7T2Ub%W)`Gk6qu36_%9e|vx6so;m-f{zlyN=!S}zj4T`if z$i{}#_z*sIxkN6r|YCxpw# zObwGY9>7b02FB`UMoDkFK&C>qw92D?DO?24U7o~;xW}<)e)7tW;Tmb6uC{4kLGtm7 z#6dB#nur%_`7WStbf$M^af?OC$|-fq1O;5$-8(jgW+x7uq&}&gzL11GJh+6y zVI51B?cw3rtXPjs$Vf=pCbrGk8PEV=s(_dInG1C$jSCUEq|rnUIN3iE3jBG}?T_D= zjOAqQBxkV-@onPS((Zp}e`v(|U}R^Aw<9Bk*wAyO8u5d)o=_ zu-WpPX$M1WJ>+ga910;GOyC7WXZz`Q}}le1!hxOSoxSBZTMU zR7O-xOg5(Hdy>k9$k}ZEdqefz1C}MlHzoKWF6*d7fUzyR+)gHtZslDl+E6(d2ikXK}a_c z6vV5jh$604RA}RSvRx*jSWzx~k!@s2DZ_w88bxqUG_hxT>x)XBVHmGSuNFYwW{E^CeXX z`PdBMv5ONn>tmx-u1YY4xW1*b4L!PJTER3%@G4C3W{DsG3wH4vL>E_y{sXNjg5^I8Sq~?Fbp*m6$9R0@7e<#xEAXz z%T0y{2fb%b`Hb~j?zm@|@HKn$WRK-*T_3aB!>L79F6NnT9xQa!6s)oAnRjrP$U=Jw zCz^?%n*j``yyz3n*lg?QhdqFm4+T&#g|~$~t#*(1TL#ztTE=Xl3OvK(k-Y8kkTUL` z8kFs`tPdDIu`-T5NP=R%uB<(UYGZ8sb|!6n@T}G0j22eEam~3_&6Ud|>1PO4^hKn& zcxu(wLgroV|8x+Y|9N}C{h9Xfu$zf4 zVKEP$FL10I5vhpwW3jGl4tYe@XCl-WiphDSN|d8KbvbIF0!e*&QkhL(wg%zn&-siS z=LL)t1<%5fxZw$!F2Tt3R>1W~)293a)t)0#{W&e_({5pvkS%+j^vrMc8O1s*-Tdk4 zQc?#BN?rr9MI?6qewXl=53dVPDH7Qz@-{9N;65nLburY`Lst@1fD zyZHA8u2pHC@%NIJZr(vf_1hV>8Iw?C2|{>X1wod>L{(!@uPlK;-P7g9fsv7q$jbz? z00|C^27;QJ>G{+g6wnD8k3m+jI(D04F@8SJU~F#t*f{Lha6`jV;ErYs$`+~QBV9>d zRL4*D#8hb5i@CQVs`Zome{8t`FB@Dj_lpbTzCy=gaB9ni`w~X?!4{{!`{2*;_VVb& zMTF)%x6r004mOFQwe~?1BJ4oF|QBpR4T`fejzDamFo?%OO@09Ud z%lY#sD07F24ddItg9q4umHqQ-_YOJIvbhg-EpZVO*v|0|5se{)<^!hLq4CD`!Lb)aS0AM=MTWxXPUu$naum>oO??^WWe(`(6{&69C(Oovk3NJWbb`_)<7*5$Fmq1$?ew4R#q2`Q z>@O{>vE{%NDMWhb^jN|IxhdxY3Zm+W8?fcdx(;9#9RNnCPro~s77EmCzAs#S@16>U zrM_@%fPw^mj$~Mi#0g8@k)l_MShMmMt^VO5Cy%>$*417WgFGwKz$j*SiAuHA+sj=J z(4Ap@8+VWS^^4wem1`u072!Q-3IHl9JziW?;_^eql(9W@(aGdGDbHO4NSrUvn&qNUyPUX@);E5P{UO-vH*ql_Rfir#VPJ~`m*nHORKT*52e8e z-Fg+eOk1u1d+8pb739ercUUU4wWX`JkkGS%(G#S@Gz%PF4SzlGxcMtJatFf%ZBHq7 z;4f#KER?_i(-7b6D_oF-WOqk2D`Dv#TdzZL6ZzGJ5~k7NjeJPzO>G<*=LzcG=F*K{ zudeUMOo2Wpv$tpG64hF9V!H@DUR?Lb>rS8@2Pf5$G&BSjhG1r!Xtla9<806yFWG-| zq-l$b*Vfg|7azE;r7u{VKBA>Lx45AcixJ;G{_`8=U3CbH%_7M>ewFlI1@N+XZ`zk;k24VI(!2p z=zVrC#6R=r*D3r*l zZl$u-!%o6hS@S7>f5aDsm+Vvy`%l$IEqN}rmq*<-Q^DasO3*CmC;ai`?C!6;7TL@wV&Q5I2XPalAGT8({BS*qAb zRy5d~oUr4OK`;$T=`}Yyxweh`W1h z$@nLjvZ8$-O3Eg(T&BUj@m;is!FVQ@*z`w$*pi(WGwk177^D`m{ZqeHZ)+RlZ{Y6Of9Xo#T?`}2VA@5zZPX+b48O7o@-=8luN+kM4uT?avgG7j z%qvsb^Z>=14{!XRx&<3cQTp`ol?Vb;xLDu~g!Oz{T6@Ra6f!V@W3y=7K|5Y z59_=MFZvnUEdJC%qr&9R0sN{3UDb~18o4{sm@1`iDaJcKf2o(sF}y6~40T^=qojW~ zA&xQs*CF#M2VnMt+MV~aT4nt*3Iy?&+M?)Qhsu6-@EWARuRW>hN2-SD;(&ok+l;H; z^fA^O?vEBZ2i!CbU)N-rnVC@|xh|oAV6=kE%Rr+HceLSn>MNY3ldOiHx2C`Zd!?U8 zYS%=B9uiU&o#6@M`~VpRJf@J)NP(DXwa&vy9(p8!l%7!?Zq;s``hnrHOs7#~4!B-v z!VNhZmsw7Y`HCx)Rn-jt?naZ1alnC*NncN>w{J#Ag6`KykbgJvV<7g1F+FHdc(>-uB~-Lmb0Bs=Fpt@c$$x!r^Ny~R%Dp?(7oz_qt_D$xI$y*%2U zy53%4D1N-P2pvHTo-Q}htE5mKk=Ans)pk$OF==}Kvxy!3FP*0D_up{t6_<#p2AS%O z_ts`ABNI)2a{p>5Slb~e^h<5~6I5FEW=_F?sRxz&#jNr^tfZSg11nnJaaTYEGJ`8{ zlPy^^!(Hu-2X=4p(E&*7mv?!B%F=W&LbhYMgn`Oqh#hL25SxM(M*b)GEq&k~+dc80 zQZX`7htU%?MfQEwK$>iqAb_x!;9t5Q$kY6N57p@amnA1VKkzIRK`d&aeYGm*bP`I) z7A04fEd4~&9qs(^0gydmVb}OcWgwp{mCr%$2W5rT*O4s4a^g<9d2} z*7H&sRBE`(Js?X)64IqF{uU6ZNPe|DEeBJYWM^sc?O$1QTP!I%imm)eovUxl$Aaj7B z(%tOkCQLa~*fmjC^sD>t^r6H4eLSiE`SD@A5a`WgKMosux{cFjO+}Wfy;MED_>F02 zAaG|*sY{tes_ypuD)s1!h~mEoCSad;w*7qw^AU1zV0jO;lUGo1UWB+#3Xteu+iGmo zP*Mv2<5 z+eBd_Ds}njowDGynHzxPR~|2IIT?)N%g=BaV2ae6L`syqV@{SX7W+wQWhwIzg5g&m zjOJBx0GhQ5hl>?9=WUU&lowdl+S4%FSNGEXGQG7y4C|e#)dg`2oTVF7_?H%>-bP3n zjrd5G(k1PiLxm!vpDc2!sv!U-vzcPq)(J4lKh)J>#bUH8l|SR*MYg#=5il|$CnY5X zg#lp6!0>Q)IP0q4pG)zuj3=!gBIo*=IO!D;ziDL78^5u>U>n_>LL0eq>D6}UUcG_E z>M^})y93$l{YfzDZ$Lb3+w-Nw`^eh7 zE2MCe`i!K-e8P4_`ql1mho)(aKr|{~w6DmVWaZyBANIdCA0%F8pZs|)+V(#2A*kS4F6)b5#bAHNZ|?S^HkM^9N4LDF7>_0~BM z2!T&R0@3-d!~Y+g6BoDhQ$p&{M@23}aJ3L@U~nlEcn7SS=$5Y0YT%pUR^-(138@iV zIf)4fsX+?|M;mR4IO8x5t~ki}^ON+d=;sO*s-haY_&6@yd|L&e+p0Faa|<}Bo<9y?$z8%e$>iA6^2m;Rppwo3~RpCx-ZEURiTR5W= z5(1-HsRUEX2xF1q+U7C+iliXcf6bMBJ2>bY59;3Nm@QS)sZ@5Fx=sPAZOiSV>HTr_ z_zyKT)5UhZ{)l&zR$nAu>DHVU$yQITg( zN~RsMD0UftCAI&t!&Swyopbj>65(id=sa^NZ$cRH@@_xngJLnPJ(I7R}2 z_uXZ&M3J+rTUSO#rC_VDPge&hl%ZiYMEZ8(f@e}<`?Fc7W{Un8M1__c!)>sJk<9eJgZR4L2=io_iExVfPPX(LHfXchV z2o)-R4*PhNK)aPcj-IX2jM8#B#1H6GK{_`91hrQ$@`bj@)P(-J$SCTMY03DA!7o%K zPQX>t3y6x5qZI6$R-~Tq4rHvFu|PmDF)^LaTJNTRUH{tL)CEK;w#t>d;OaC=KXm*0 z`gFVepc_t`j(k*KsD}WhX<$FDtK_W1Thc3r=F;PFkUIo_{;G1DLUfZuk!Fti3yM}2 zH>rixd)wy*mp1{u{#MqR@0jvmOG50Cp_TC$!Sne9c4S*8{Hcvpx&@XAnObIY%}FzK zy%#1Rt!ZW%6Dgd+Nak8IZonBBCEq)J2IeM2ATQTxH_1pJ*n|gANBjgvvg#jQ9HoSX z^NFrcWBoLLiUq;Kc&kSEwuE;oAIrDSn#u*mNSfg0URa9p>pd`@DSsf^z4Px2^5khZ%3aiT4qG{GfNESp^>zMqIY4 zL}V15V3n5Z{}9?J#_euNu6jh8yEUf=1(^owHMhS%q^^$OD>|!7txk+zaEqRQ+;Gz3O^xI-RTT1VcmYv^Ei1o&x=l5EQ6~A z6mxOxD^X*fxaa#e`Be4;M0U_jV0OTY-?eI9dA~8=HFT0?PX&4I{ zblYFwTSS^Pcqk^-tSLpU$vr4|Tw{B=7Qq{@wI`6DE%)TMUw);u(SKnxBBeqDbSnKo zvJC^WA9wT*n8bty6eN(ur>`wT>UHe8&K38hB>YL zSk0G)3-@E)7USGiXHo;}%Dd+9xcn&3G`#(8Te;kuSzjQV+i!VDE7yA339*hL% zTkElh^ct``|EU%O{WWnS*(chKd40#BI9f*KhQDw8q9d@n>QkMw6teDVG@qrHzNZjKFpv zSZoXU&-B0R|4P;Bvs+r!2WSbXKDsy8e0#Gnr_WT&sleQ=pe(7eaFs}<5zEP#qxou= z>;z#XCPbaxbyA^%(a!?XDOcB8`h0V`CT?{^$(!9Y=1Q}a2iMng^oCIK^GN4+i;?ht zEQovp`etjpbWbL6!$Bq(W5>|D-(W)jq4W*A~y#^Yz%}rh!puG`rzoA88J|lgA z#&4BoI7xjH0S=jm7&&JD>cy=u_0Bz`K9!MCW}J20)#@V6w9@(J3kH)Ri6M=Pto>=Z zmgh&2`gU)LQ}ysy!Z9!e?Ke`-{yNqSKA)9$RXDICI5r_6r?L`pVPWCj>vcY?^@?O} zZfzonn}%B zE0{k4fir(=dHihU|Kn}0{S_5m!wyl`dyB0F5QaNF9^?a_0%CJ2FP|R4Fh>rZaNW?u z^`=&?$f{e1R5MGF!r|XZ7fC?-3fv+uB~atp*#sjQym*?HqR(%29t?SQe!0%|13WXh zi%8P4vnXjAsEH=lCX_M}pVRhX+w+vmWMH$A4e1VG&J{IaHU7$9G(QCY6&CS~L8#M* zLO%Vx7q#Cc=lHEzN~ufD0gA4Lzu211_{VlGg_2-$&@VL9;yaGha>^@sL^A)9l2j4{ zIM9#y4ice`u!`RTZ18VS%VRGRiIRK$w@Oc<)qak*>Wi}o5D3*i6W&W z39=gx9tQ`9Hbatj{nEyT)5Pj(grDC>qP$yyb{R%`<4vwyxa7=N-o6$m)JsPA+Ty+e z9#xkr#*%%M0EIfpy$6@AEw-j7>a16w;|}`yvR?-3(FgnV;$Y5Pn_MsDs`p}i-X zH%BjA&4}*GIu;xgbD`4y9IRhSNx^HcxApHN*oP$yM}%>nC#rP!Ta~QT)B4TEL(GbE z-2*(Mh5W^wqii|L@0GnmBxtUhb`*OdA90_h%lbK&vX6!T;XJPi_A&o@+T3xw2hM#& zBF|YPbJ|;%Z9S#Y4@Oqd(KBr4WD6dxqa)DWm-GB&gSREc#k&BxBzoocHXA&kB=eO9-wnP70@M(sNdm-1`0cryM`%sCX{h{3-jDs1>M3Cp zPO!0;+zhJN(h~h)HaO{f7l62a3sB4ps;Q;qj+)S|laiBT7}mqsPeOfX#sD#l@5RVR z)=QO=cjme;D}*+nN!YGZ07@>kXq4iK8XM+RJ+HlJlTHL0?)Ed1<# z0C#5dbi*B^9S{%}4^vZwm|C{;H_DZjvm1ewyC5Kk%r*K!^Qn)Dh;Id#*X?xz83tQz zMfv5Fhlm)!dK;Z(FNiS(>I8$IVaKpb5JG0O!{i+_d2Q)#IwkB)}v zuaZeWieZK)u7lz%Jg^{5J?fp$PuvzVliqC%E8%?{4;*tozi(ta?;TF$Fv5(f4j>~U zL0BKCFZaE2r8~H6R#*bnzH=QX(3@}yOZMP)0tsFqB(hGwMLTvmb5?g7)BqjCG(j#` z$LAu9O-nwZ|NMPT+e}R^?nl2hPCSi9$ouFoDj@;G`L1U@ZGZEap&I+ff^C@hyR)x~ z9_IT`|3?fJfM*;Z5e#1n*P!UKoh#F5xE!LiUNW!W0AzT8IVIeM+9gqvgzNQw3y5Aq zA-Y(KKpJt_Ay}{sK2o3j{AhA?)L9lfzbfK3#uno|pZJCy@=tHJ|0QbwpZ0oxi*Ehu zp<%eOEHWP_i`7`c-h~H-2~GFx%kcqZAfqrkMObM8Q`5u;TDAaRZ=+}s1n2d(&A(UkkI2l`P0AtSTbW%Ha-ixl~wZ%3%f2|Kmlg#k}U3$1bb`nfS zdjVrydMD5E2ZzZgX}H2cftovGI!Z?6{FvW`WCFxf$g(QX+S{3Su%`)O?!EY83UkX5 zMw(0hlfF}QWRzk=%sX93*?NU7VWJKJG@o%b+2CEp4`o!=XiBzvHpb5vOxt_?d0k;4 za@zJ~rr>&ASG&)}t~%*p-FOh-JuZ;o-7ci9UMFlnBQrwy>Pgb6Apf8{QXH*+?wEb+ zPQ+yGG_EQd+qr4@@woDoXn#~)bvxlzHmeHaMa?czIci~?vM|vbH_*q%h8hfIa36&n zSyawa7LT=&xW$G8piclfO#YsGp>98;pzVLA5ELJeR-)O8^>*LSi?Sq=qJp;fJ6Bu& z<9NTkh8l{jJWkTjN^L1ak|J(gHx;Eoipi#D4_+Yh$rq@zu+rQ&C;*a*ae)&}=X|L@ zlEes^!fFWtQp3no;)vT#i+xiq8iyBIor%1~Ae76VI2ra?XPiwly9|CNju-9k8Z{Tl z);kO2e~@@Y#3O43Xs;IKM}}2QMFxa#1+i>a3!aR8- zy~_OcF6@k6HA;()5+P~b`CMloX*46#*$f^ZCA@Z&P}~=!h#FAu)hq|Pj$n7Z&FZ&i zNLJof^pbD4ZsQ$`i&zwur}cZAP5o^?jP75jq1;ioopZ8GApT$oW6@|P39DtsBzQXd zes7^tY-DjxDbGQr<)vqozkQfkT%?Ev)J5UHB@#CCd0)su)&V=ge2X(9fPe%7ugJh) zt*W(-DpsoS!J1INoUt$iJi@xMDP>z>dEoF_LJcM?#Jiv#z|wsuG1}YLc$_bZ0P78$ z^m|Hxk=qD#|H{kZv> z{{mR69}NDcTwd#F_A(p7Yu+^9%KTyU1kER5e}ZVGY?gDS!|nKH`-P__qen_2j);*F z?H!-=-?nR1f6vN0Kl6N3vvpOCH?ZpL*=W!;1f@yklD%#DE__KyvF-TEjI9tstTxI( zVb4{Q(TS;CvHg(Z&2cf?$S<6C%=S5Xvu0ywO~c<{+&$<_qG)n*kSZazTDwm}McO!yFjNF=iOV_~RiblDn0s;;j567QFpwHl4?>{%wy#GVumxTe=kj zB1gp~PGPmWFIo1SE0P5Sdp_BW_eRAvS22 z{DjM42i1PxOWRe}IYP%q%Z!?X1HYrXiO)ocbqVW6@vt=z?SN6OqPy?W`Zr zGM%5)ujv9t1_0~L=rf+A2gD1@HpA5SPit@RAnP4l_9FDB=82igda(SZB0=1ojIZ2d zuR)0P*Nh`fV_malFT>G)3pc&!e-Ex@9Rd@idm)M10d?-`gIp4K;TOz@#0ydny}C*s zH#5#)gp>Rq7G`j3_#c2)(gV0}U<}IVLgBDk)`9vgC?Mc#k^vNG4~GkVl(F=%5frl3<))X1+0dZJPVy|d;$UG8KLi~{9fikGk zptcil>k$YrLqmP-f^KhVQx|{@d}CAM)z0gV(YYOvN8e-hj`_J$A=S+g%sf&L;w=)4MHTO1FYNcWa!VIKY=Ol z{K2iDyu81-ye))Utq#h)tnJYxH$UHZ&-uNFD^@J!7ZUP6yx$7g+uWrb>lfOK2pBYu zDvO^~b!U38ye!j=&L?FC>*xCF44+@LEx4~7Q&@=4We-joX*E9Ca_JMD$dbqV`sB&e zHz{{x?9fGYTj=R-I-uH&90=Mc>*tru@Eb$1t+3c37il*^Q%xwLj0ru*bgjL{t8&1y~3T zn$PY@t;H0P+d7D{;59W;d8T4S?$@9>HZ$h?H3Xy$Q*~B#hEOjYW+tsPdS9mIH{9^utXNbtCCn@=IEAuPdZ1JE}kgk*d zZxZbjKp5;W?9p;L37T4H*zq>LGuMM*|`(?`d%f8P?NV*2{s3DQss7{rx~(UIvYtOcB(eP^m*vc-WsM0|5sle=7=&pn)RfD!!6)WiK^IqCJC7=rs$LnlT zh@P*08x2nZ3mB{f_s;^%o#edO!mX;I%60j1T(*qLvkMU{G)c+cAlAFNo9xM($mr!M zpxspsJC`5S9Zo+lzqMy%7nHzzssiuiDu;%{s%l84>EU{3*QE0Ko~n~uYQ^mcD0l&T zEgo07+u06AxNt=YDB)NuuH4=#BuNExsgsbAc)LKm&fb1R_e1Ns>k6I9mnLh79L&@; zY~|vb?=3s#;ubtHKrG~anIrwirIw=@6y^RmHQ+lGGG-PkbU#)JVq?y-y=cbos`WY9 zL@&rxm@{plO8NXXiP8An#s3j77GMEz6fk>)*^wv!i8${;67McmzP+;rYG_iKc?KrQ z@qvTJ@eO^!36o^`lnwh^zYS#0!Dq>(^JHMaS zjIbZeF{W!kJ%R8A1yT{Z0p(FtMn*Y<56@Z4RfLrpLlPir4hAxPg%DXyMBz_0xFDzc z5abY(CCM0N`KVZPT0Xe|p~dyts8#fsDwtVY>qy(KiJ$c7|4egqMw$#tOg8@H5s_oV zl;Ks;@H(FUSmJI>qxo?D`StcuY8yecH+vyRJ}??76dvjZGd9T!n+7NB?J1XNqNiJ5 ztb_TkHBe5Zjqj%LY*OC!!wEI>`m-j6B+EQ9V{|MJ1}0cPq3eLWTl0hE02Y@@RH{vK zk+k-7jp?=02%Y!wM5emJTT5aIyp#Iq;#N@8u9g$>t%audWVMCw0@flt+a~PLsoe0j;4&X+h zc-H9`xs_9%VEAYzNXhlIkkY!5vv6gq-SIV^_f7Y*Nr(P#{F920AnE@`1VOw2$jc@* z%G-eB{!^h92?$Ie&L52JoP1Q$E+9II2stQ=#Qzd8asv5mLJv0XEfEb}r zm#m<#F|-I&d@KeznJ{%oI#^$9T1;PwJor8>*5i&FHy-7UmW9IW8JQ>K@S0`pq z55EO2W?Azmwhu?*i`nYnzB3bJup591OR9Y4z!&C_JHO8s@|@K0&(A?MOgk|I^)d*r zAj<8S$!!m}knQ`*q>!-R_Yu>%li+yik}JZa7GBS24N(RU8Yo5b+L(&6yS*{ouLEbN7M#GVJCKD9IJ^&u4VmhZI zEl(ZI4Q~bG$&s==Dj+2<_IEc+n z!cUY_l>N+S8HW%tApnyH`zzqd=QQlZ+y*4U{{36tQwJZ1#fd)+Gc?>*T~%D~4sTRI z)_3cF9o&@%HiMZ-4;~sA)JoSbD%KE0lk01T5F;S?7t1>hXF|n|j|MIJDujg)p}-aV zO2cFX1x-pa>(@GM`K*gcu2HUvT8^BD&jn^4>_B+hVuq3K!gx(JtAWQ8=-hsf{uSA@ zQd>-vUqnrBp3O*=2CF#Ys4sUrOz>iC3|4=%7}x16oCKzC?3Ml2vjK}RpVn_M=Iiwl z61TBAid=OGKUDyCJDu!x$){1WISYN2k-@RPN4flo-J=l;YHI4<_s|C*|5T8(*$>E) z{ATx|WAFA;n0@|cO(op#JhW8$rXL1YS)CK!ihYpiNB+F$5g46agGuY_qXXwElWTi? zW#bWC(gaj3E_t?5)ubP4jSl@8WQCs<2S8%~I?*Qs@&ElK;*5}>mh2s-{PZ?)g}=qB zcTCu83il-p!IDGb+=8FHCF~M2ST0Yp^KxgH-g>i#X1w=`3%FOn0=q$lNpJ$-M*8s1 z+90;BAVc*@2)QA07t)G+enER$qb#+W=$jLev1cWD6{Z7N8y(o>ZS|$ups{69Kf%`> zB)OdLa;ld@H2m#QPa%uwnsgvXJiOkQO=FUcO+y5+ZtdU#edCkLdfWQ&7=a-8t)k*ooXy|R1aSGC zKHxBPZujTPB7MMc5C*g(OstL;;m^NokvOZ0C&nIgutl$p^})uq3s|AImJ1V?qrE#d zYe#kmJl3uuFbmZM{E9=q9-F1%A#AsJaei;_SOW^qUu=I>c6pV9`c1BH+eXeJ^q@IT#;_Xa)8YnS+KXcvXDKgpY{vGQRD-HPK~P9V-t|pbjhf z4TTJUDR-nt3dYFG$PQMjlOQ55=dQ~wchiu$-f>P0r}AKwrU-WMtU>ay4D4eU8f$>D z6n;s{S%2nPY4FxF+K{}UshLfHvFGQv^r`{XTNlOlidn%{=!a^ZO4~D_Q?=?0_w39n zHI=b0p((@BqR$_q% zfvs4gr=XXCx_fZD-@<#t7#v7erb1J*e zAI8;-*KTgZi*ZY-(q`a$9LbF3GDo_dM3;!Pk(4?Ugu~1xBqv|YO}EY;lL-VK_W>)G z`v3fvxB%1{x_|;F2okVQzH1pP9nUny+!a-k(%tvbs8Tt z%{+$Y5+kF;hm8aT36a?48c@}@>kTHu;tFOklcPp%Ffi<>1hZmYuZ zg)x)>OPWAP-#dQ^ET%W<`%Z^35@J87v?Yn>HM=Osc7MQ2Z+ECbmu#L~d1zn{U8EF2 z7C(oPkkA`^?!0Jy-0bLQ6VvJCK{AD)XnZ)iX4u`iYk2=*?qa%74#35!0Le7c;7E}b zP9U(4G%(zj4F~W+2-?^%00OS4s3?%~IhYJ-t=;-c9-KO+bpa9fAxUb{yQQez$d`re z*0{34fzyN<2)ZLJ0U4osX~BP*&Ym>9(~RqXV&Tud2h3pJ>OmihKBfC?g7#6|H7k7OTWu{ zZm{QE?75=jPwJotQznJM9>PFS*kAHwUBSMfWaOwr6^U{Pr75ppV4Y0t6PiZa9T&_J zo1Vqr*q`Avhhkd|+cx_-4(eNLcmAo@`7R2I(aVY9ITIEkDvd^gE>` zAQ&p^%d|!hTolwI$e0Uu2NF z8Xi8cszDtK35VqeH2a^?LWK~qp@A#oT_eF(#+b9JTe)5}Oe65^8>nyuPB17KM5gE4 zD;?HM1}v}{Ea1-{SsYFW*mwB=uoTH^djL^~i&+?0tx<0OnWSNgQ>bmCi}F>xGBgwgqT!LF<;CGYvJy9d;olLS%zvOAqr;%` z_Kan+(B`ViNGN~N#qQDZ*M68_%W2*8dSy)-n{lDiazPrfHCVA+GaOLJVZu`fBZ67B zTtEPd3wR*oVY-saSBs|QV|()vmNK+nJbzY=`gcP;qUS>@^>AOEeUsOiF26yu!wDHp)OFQ{^c`C{(So7r{Z+ zpK4BG?&zDMKWdUV=LrjgleLAD#BgED?q5QMTe!n#WV zYlO9HP(*jwG!2f_0H^{6W7s_-AHFPu9;BE{gFSQD!yqT;b9i&2iw%-E!h-X-%X|LB z4Z^|djH^@a-xg>+FcE8{7;r*~O*#uZI`R2XuORwoZFa+N@RQdu$@{!K5}?kdx}G-5 zj9_%>R%!$IWGD-#Rn zOVbG}`-{&06yxCT%qNEIzKN^76Ghu!NF-cl4swfleuZy&1U4urXgPmuB&6};rUy$l z9+ZLuBZo_7DEIkxNHJnzd8hi>P9H#Flets!Ea>HV< zm#chAqWo+aJl}!snZ+&2Z@xRoki3Gfre8ohAhkOR*Odn+J_oGVi+Q9lYocltO^{(K~ ziYh5{9MpvcN&{o*a#|)4aUaA{8rn8mm?1*k8>0yA`&>$Uzr29$Ka7!CT10=KATZbr zNpK2ZCY^TKAi|Q!;ZX;088D#>+2`vv?h1G+(Z!N{qoYzwqddgDNl{Y~C8RXl{GbF$ z%5PAEHGI-(79s%n86<5jFItf@EQGsf>nqdDqX;<@W{-&RP(NaZR?MIuL^-;>x1@c> zcp$*U1F{L;E(LLNpan+Pa>I`W+~N^B-2uH!@u@%uCYb^(wm_#91`H{HlP3cB%7{sZ zXMw4mpt6~p38d2^I!L*vEaO2$d%|x@USN~Cu)cQxP3T*J`%g_FjxW3dt}R;fl7pOP zV!eXTF|*kuV0SpuJ#f@4ZD%3Q<{d}=G}+}sT>5UUUaU2hQC7ydvfZfx;6*3EudjzDV8w4aJq`Rea0Ed?DmJkG_lx`WirCYi?M;fF- zq`Rg2+q`zuAhE4{&#ow3ubth*ns+sP`#S6_m`SnI2c24J?}S>?S3-{b?&bEQ&$~ z5I+IhG{R+K|3}QU=`fIN29R_Exm7s@u(2*nE@QwQ@i8IqF@xpt$^a~RpovZpQjr?X z_MvO`?igfTiW&La60AER3mdaz-=_v=0+G!>{@dd+H?RLLz9ehV*eoY>>v_TA#u=xO zM(!T04I0^w2`;eECiw8nou0>T&L6O!fO&Cyw^0DZ9ZHewEo!;Dy^~&}Gl~N-=y>Fp zpQu?c25Dfrvbw)l5=4(yKrBZ%JGYrcV10s%gs8&b(7}P?>XUvC1QRJ^^_r-yW;`!b zuk}i{saEW$q2dzt^_H={f!z{?DGYK9w6KpG81bI2u3nd90&^rZE_%_V%N*3X!N!sl z5RmMyC~Cs(%AwZf7GXKPS;&gJ85>{xHUQ%5ZUzOmTp+Q|9i3=9Kxi4pXVa1FrU16aF9fZr5D?e{Yd zG9LbZ2cM0)nPdl&<~a*$)N5$?9@y>r9pnEB*I*H}fA=gJ#SMJtir9x=WOpwX-Rvwp z-8fu$Tg%hpCpI@WjPI_`+_o}<40&Yk$!cYn|GRg&Lk>RA+(;oo6Uf4=2g8hZaWQ4 z^XPn2RyN$Y#(^iaq1@NK_9AnWtWodTf>zF%8ET+THhmLUhB1zD3=EqldRP3k1 z*NbX@U>nK3e<%1&IcKME*Hpp`mh;-8)p-NAW_yP0>r@AvbKG-QE_2xN`1tcKhgUY8kS;Zm1;6{lG3eD0%Ot{cN0vN zi@L?h2W(DNaTAQL9fU>?G;5h8i?&)+@Kbb7`fiH)h8_0P?9knq4cHLOO}2b!l?l8e zS3hQXR;S4#2F{nHqdWwWDf;GBFC@S{fs1(^9oOH zd{o3CFM#$m`ofGD(b?I_kT)>hN*l0KPqA-AiHnQFPQwIMS69pSV+2vJwCd3U9CUz% zj45(=3c({F_-vM2=Lk;LIX1o4FOgk{(lNO}n>qNJaO;SB$BX|L-wOEm8d`gP!)0hvwY7&%0MWerZh+Iva0OYFp}MB%4xEhhTygayH` zLe^pjc%O|C;UyY}^c6Pa%_do|KxmsFA_;SjG)d{pl_F8RPbf4&6d8!2Bxb88B&&hg zsi7i54n^>nZJh1Dah8^G+bQ~ed3SwLqXO~Pb6NE&7<7Wn5et%KpC!ELH*%}RZ)>b> zTRW*XIe}Of3ev_(ykV;>87`P}F|{byrxL2^&M(npfX_AQFPfMp3Nus>&lhR!{Q&|= z$KV$xNd!2xj39}YwIxzONAZ3M#@(aP;eP8M3ecuOevFc{9HW|;n2>z`OX z!DYVZOyj_kt;3xzoTW($heX84i8;39fMe|#Vx@32oR|l|2xSX-wMsUQoJXD5OLwSC z4v+anLqb4)YbA7JECw((^X^@iPliY5si^l7&rqDrA*G)(UaK9AX*_~G5Ra4Y0V+g zh>Tf8-2B8O$_jy2@*Gd_tr$x+;=cS=RFgw5)3sRR(9+h3DRGIDQPI?zRAA+xe40OHgG5j}zbKT+@li!D#efqV1q?}3>%pEi|N z;3tZXiOHVNZvvXXN9j`-HnsE;K%bRycCH%TuyDiECOHOXk+a@{m7dp|QnD3aUCwOy z=5R!ye<1-S73qKQUVp#B`5$tPoNZZ}>-MQq`U*mk3j|ghqX6zzVcG|^Px}CnDBX4n z^J!cXQh5>Enw-7!aa9%~HT2`n~90VzJmbyRe$-~6=n=j8A`f=ps7 zpKdy?$sU(YK0h*y!GWQtOglcOTdI$SAC}hE&3qL_IBO@W~@EG3_nE%|EqYEAnNrw?-ON$C$P6_-x7Y+pa*xw?6-%a zF2xE~@>Zt7PjNqa#{)0&yn}%t+WPH}-r^|5oQ=H1BZCvuw==C5T*6Jk@UW7e6|zt@JML8r4_ka z!_{?Pk3+4vbLk{z9{KsnYf2yj8<}0m?O#|N>%S+0tyUxf9LMZ{Geei(uR{_NMSVo# zS@wT?>q>rfSOZ9`yu+doo{!ogXl-T#vD?9?T|@Q?s=nO#h=Ma{pHhv*yPcL({{jty ze5x2oDKkN$_nAqLvj{w_G<}$PxaC>2!J0Hhc_rRuwT9H?j_iontM&&CxS{)U$jg~A zjOGdC`rArJZqwYAq@pzK#GTI&+Ih}U5z!QAsGJ|Jf3C-Z-Av;_+tNurNHS)Up%^!T zcD}X9R2Q*WokK{y>c-T$uwQA289Yn!e)?CyD;t+e{>g7oK{WZB9mWcH{rCsq%=>KY zWbAW_xLALMo3ogSr)Cx=CfTO10I_-(KYSAa5#X;%8&78(~9@R)7~ z)Ifk!QGHJ7*iuJm^_?5{G2gAK8UMqjVSCQ`Dal`<+Sc~4sOR5~w=v|beg_O%mnl{AIreLo8tA6$y+pJFRP|(#iN7 zK#z>Y<5`Q0CmjOJ(XH&1atC5?`ZlG2kzN4KCXt3@X|@cwYuCMx@p zrq-`WcKJ!YM@-Hp@oa~!6GJxvB_~nxWMJduq&*(d&<4%8{}*nI70A8@ z9<0HW(cf&8(Nc(0e2zZiuzDf{=5=$}FEX)o3ZOag1!&LC%i{o&dWsQd8ha~$wCy)^ z`41NWL}MTpN5uA9AWh3GScC-L5)~XLqACZb1kf2Oub$86C%isEt2cGX*a`VLl=HTM z1fc`C%Nlh>UfE>ywvq@OJXC7Hw%#i(gUQ6jLF*is+CTk|^I_JH=Lz^$*6)66QZb-9GXMoOrHqY3L%ez&ri@y7DZ2{qadY{cB9^6pshlRpUFQ0pEr@f#$Oj zqNcxg3Bm~fmdW1{bzLYhg%;ufR*-dfkKB9m3a%ApE5QKLZ*(yzkM702;n5;8WlVU4 zgsa>68NEmpo|Lw#HZ?l5RB>j+C5;axx@=YjmNi*%96Oqt41$x!rz#DTgT<3!{nno$ zR5Mwxie**Ok(z~$+Ire6SY(GOH3$;ZkrkEU$e0mOzMr(;^B5}Ns^G#b)^bUG=k3Jc zDB~irp6{Z7oZ!Uz?lH^HhJpev9Z^scR~&8&0`kbMD{(>B7l%>( z{qhcE^b@%`IbX}mu@2iFMEVB?sP*vC5#i~S)5C4bS_lA=&Zh*utU`_qph7RZJ#d zwkCqZ{I=Gf2*p$qK0}Ne^#CXbtNl!6l0v{`@n{_Us80lZf{@REN}eZ0>33?afdMbP z<_H;52r`~`f0UV-QP>-IMLbshT46>vQ0l3crDm~aA@0gVf9iXTn)?Svc#K}FFgXg$ z*!Lg@@Vynjs!}RNA^i?N-c+^8mjtQ}De!(; z_0^--?R+y5dmuhMY6Wm^px!}5>mN|D9xzbis#hi!v5GOoHUBduIEe0{7^jesJsv1?<)6Q`NE}R zYis3y?E|F6zLSV&i10uxnd|->{@Ho8&`5Sx7TlxdJrGC~R$0jbSoLB8dSBHD+*iP# z`i*28hmApE0ArFZ_)z7&SFUc7Lr+}jtx)$Zv#i7S>e>a*Yxcih+V8(Yon&nU$w6PU zmvXESn-FR2Dy;_bkROct>h9(O)*r`2JKxva4_pO7Z{M}x0}R=~MGAm#X{OCbh7q!s zD(u4N-E^7dt1yb2x?=KtnRq~fiq$NOlca+oI-bN#51WMVmj;@r?s)En7(Rwr3ueI) zNL|0Z)VO;^iWD<-%@>XbFs{y_o7k;3HJ8-R@Th2y6%q^uH#ufaQ4bn8TT@7|w6(nhpg|xl{Cf^OUEb_qNU!9v*l7?AB8bOfCvJf(rH~gwfkPpL; znP3JTv5&84`n7^&`qRupd9r}KoHpBV+mvTOS(0jL7Z3z_=t78GrCEU}X3d7*y9zq( zx#s9FvsEzbdY|PRc?gj|9ymaW)&+Ru0niwmsj2B!+UtZz_-qZT@CA_Uv9{g5x%?2A zuw34y?W^aTWIe9G39RWG0d^!>mh#+37aU7*hRu+9g30Cybh-wZ|VY5O8 z%jM<9pHA3l;wF(8L#UM0CG&I;^Z1y!tos1u>?{ zAyfHyfdKtW7K!K3M!L8OLe!ua0qkdSo#B_cUqWSFUCFmlUU@AO3oaw%>k3IT^3%Gk zR5~oEykvWUB1Ht)X_tyL=EAIbOeQgTA2d3#n_Fz{wn%1RoaGEZp{LCyjMwP@#PNd+0}ZyDvS; z{%etGHq+nK zP4S;q>#%dNV^3yzG)?*wCJ<6l&tkanGccX}KDX?)K={G=l83vlZ1PLeXfayM0t4&K zZTmwiLh{OG^I&=&6bZ+jk`y~wv4$u@poi$UfE}|mqTi1}x5Jy!^K|vxCbsj>ZxN1Z z+6%lEUzEl?-|Az0ZOLwwlc?{bZxG5bzrxCo&kn6Hql(q+6@)lBTEY+kHs85^*kV*g zhwb4nIsknuV+U7@Lxd2iJ&w`+BJ#l;#>1?`R0psuD)&~wxyK`H9%}6Ua_}v>&H9P{*~E6jNA#diTV75 zti3pvzfE43jps*#rO2@YlLl+x34Ch*ZK~ThF?4#y6Owh--A|nMw@YqONQ|G~p7b7t z<{1$65wCT6B1v!h*LIzH=W84POGjVO#@H6_ zfsh0glADoL61~r0Q#Tv)MUbny%3Tna`E&oEN<1bcdlWC~#fF#!e6kREjp`S>s^$Jq z@Q#|^BIc$R)U&qnrfxedFOEo-91IV9pIn|5s3s<*JxkeJL;<9;BSI(W8-q!HX@Vc5 zxm=mUNw`tVTCW*_K$3t)4?vR>w5juNcM0%T(k0lp1ov z*K-+VrCdDV)HFSd!m}B~rOfIuFLOjTE;2P&k&Z|}n&H1WN>0%;dJ21=Qs?@FIp&!L zr{34o>Wyj04vC{ zH;qZ4eZB#VH5?XPrk8;-DNxz0Y2NLTYy0RX&XzJHu2<1O{4T_mY72FtP+{bAXw8QQ zyuUbH^S`tbq~#|bU*p4nIXJr{)F66CCjq=T|4EXR7#3Dm*fZXrzCh8E8To{bpB=jj z0x>cs+B@?Yqr+jCEyTE8o+9Q@#UHeuz)6obwd+k@E~H}Qeo9Yo4T;H0brK30#J2#x z{xSKJhL7T*M8;R*_Mq_V8w_p!;n@_O#E3Txv0q|KgQF#n-nbVMFby@vqRtd>+`S(q zmto-Jo@RKD5>r7U?IgeW+2QR}=$yOZ(osgcwK9RufPlbg$BC$QCPQjniI1u?=gvZ= z?IA)owkQQV9NNk6FPrZfZ;HcT5GFUCzypM+#Pp;P+H7f~_fC~GfvbFg%@`^`hHP28 z@DWN9a_6`QfE$G!5m$|`gE{x%KPb>n7d8H@lNauPja?q;yWTTIqZ!SsDOc$?k=!X& zm6bSXge-tuIzR;=lnsIvx)Abl(YwC>Jd{a#xf*0fe0xR68Ewg1h%A5QTCUUiM!|Et z#~(WZr=gHTqkazzkSiB|XxO1UUWvW6eP?N4EQ*Fl9p0?=@vt$yrLgw^mqpHf%isNa zz6s8#$FN)V`7ro=|#c(&A`#b=5Pl_HJ@0g`U;qgDq< zJy7?zc>WxinLYX<_*YU0=_`aPYgkb9l8O8cTimky7s8EUYoXj-vb7p$Wmjg}?RnyC z`ElasPu*}*UX~q4fEDb0lCt;}&_x#jwFV2YI$zs|KJbp1lT`I!l`ZMW42w zZaS=xim5AfNVZCldydN8+`>V4#i^8cJ48`cn^}1E_Ia_*nURH@?0W&o zH8rhsf#R0p^zAJXT-jY_qjAh^(}gO ze?}}9PML*Lj?SFc0s{U{jgz6vAt8>0$;;Om3X0t$GLhXgv3PQdfN9mER~O(s!GxWr zT+IxS6#**=@rLg|f+gYu?URaGN7H;H|Efg)A8D)V{LwFIEgd=B=AElU>1l7 zIK$%gGyBdK7JjesKWOvbbnSZ8?@gML5NV*t{hH=#0wF=(ODG-2fSo5Gt`@BMHi{Wd z&G|-$o4AO4dS^#W$%LuDIAzS}A#)Q4slen@3nKof(zCsx1EWb8_tCY)Dkcf1phLG! zKUhq(338Z{!zirYlRvDWfewTwPUG7b&n-c*1V+Pgdt{NT%u3zDW5Zv_9wx9(OuDyN zp52)A5BmoW3fYK5N`3IeAuLpWWo2fwsf4Cx4BqdG<(G&8^41#ru64qtb-CTxHZVyL za7i20S5L0WJ+GV|MSM$qELkTJvB2mLm;tE0xv0n%r@n@Xy}P95_`~g?(g_!C0sVa@ z*fQ5GISRhr@Zj=%R~i?)Ih;m%MpRl_>b4v}NaHdNdBfIjj@zH_DUy1A6HwV>+HO*p zhDkl}pIwjalj7epfPe|AxJQ-e=YLXXoTTjr*pu;TYQU`8Fq~ zpjRdB?HE==_)}k$$4xO`k(nd;dv6plZ2k)LBY>NyZ=Y$XwVelG!-fUS=_S?G@j*+z zHBZ!~2`fE$qRXA>#rXgLF7W-cAlW^O;AnUsW1&coU^p0=D6=fTuJVuAZqGxxy@AWBUasNfYh!3Wm{X@ zaP}xgzemAxCnDOuYPmL=`dCK}rC;Z5z{bU|kC-e8XvLakfq*{Mveg0Lr8DbPBR^Il zAYY`YQk1y*Z&tFpy;1)9If5T=+e-iGK>oW&jX)->jqI1%`Tbks*)M3~^AwYC#v^Wl z$*Ihn^wd}yhQoucFSgom*=U~E=?ZWF0tQK*vP-`oQVo>PH4kwMY*zt6i;W(i##r!b z+z_94VR9P@7d;m4pqiF!FjI`_PYVp?VRd6*j!AU((2A#*dBe8&(nG3ZP$ebfilCIT4Os zx80|p8lw}RCj{1+`cq_TS44m)9!`rWjIM8zc)hK(^WKj#2= z@4uu^9wMUtC}OVZZ$1;uX2J&Alcu&O6Cq0hr+2qD3S`;=&7BsIHKOl%EXB7r+rVB4 zOmBn$WYq!b{>w(OKcns}2Qt9{^W7)u!%zSA%>I9$5U`(EFu^J>262bvLkAQdw%oaO zZ-m(P*TqI4nNy<`Cj|`I`VMt=+L{`%S2)asvyo~iqLWMqKAD>noE~3ZUp{dz_jp#n zkQOG?_6(iK%?)F}nW8k6LO)#w&GYED(A=^N)21I1*&Ltf95DuYx&+#$1QH+Du|AiR z$iy*uH6qP>B)-_|dDR_ETc+J7SVzosY((a=?P@YO!^-l#e7{gEYzS-fgq&U1yEmmu zFZXTmEw_E{u2EB*fBZ&uMt$8Y zRjzI)35Bq?G_FnPC=4$#eBN3cV-R$f9J$@M%4AqD6KoZSgs~2%M#uUt~9V~)!Cqwpk5a}-}$iv2ROH^AgOof{UjNHHzCbo;MdT)lS=AXKc4m)0c z%a-_YUwoC#PBZYtL6de}I#iiAyKrcH-H|vjfc*uov;_wG zl-hf>eZLWP>X0NZF6@gt+n0mAm#NM>BDfrNA>?Dd2>Qz(swTQI$tRnU*6%S=&p&+q zyemdL{VDsNC-z$J$S;Uc2;=6hV|^jMO$sC6`BXxD^5%zQ1@&|41&qW6)$a-qvsdWV z4g9xfcZ|)g#i}cC&!o}J^=}ZMoC0;Nrf#`m3ARd10|yAF0r@$4EH74%o8=;G*JXP{ z`NB9y$qLtEVTE#FDaEhCOdpuf!@OGnPl7FqY_i?QEAxRk0gYZq$Nw6rK3B(oyoc>R z@{}>)mwx=zP1GVV(9l3Vdt7!v0rF@KPZXioR^tpPQBkvSL9SZG0IGA{7Mq#&7wrzN zdh)l|M$Vc>?rKHG{5ms16Klsq+8gYVl!|f< zYyvla?1>#n(|b0sZDk7a`99Ww`^D$)db4*USL&uQ{1$$kF-x~`-`A}_GaQYo_?j^W zN0j>(`=c8MjGqwWN~fZ0I{0}j-Goybv_9};DYtRlt?+xb&#-ShFZ`0~81KVCWFS!N#RIzz(nUF{}#JxhGE}|DQ z>ykBjU#AwCF5AWqAW}By(v6m9TEf6F-^M4+eVEOtd^sYc>u(-z(yd_0NGkBQ zwWJ8zEU{^3)v@pbyb=TlpFV;5~1teJm)j&L_jCw z!=hJCM*xvk@MQKZ1&l8S?EOv*-V^-!97mnHF!k@~asA(dH|12Hb#}Le0h3&BF=rEOyF&ntjjALRaG(R zH*%5DF?Beyv@4-*d2!kG0f#k!yEB;&RJ$gm6%*8+%z+Re`+|-C#?Ru}QoFAO94p(> zlcP>&BKVbFOR0mse3hDgIWB^hSK)f^i=GWklXrBX1-*DRR>I@eUA=MDkp!BW&o!dz z9V{m@b1;nU_>`et`2^H+&trSSy;2h?pSmPa*RbAh(pLBMrdUGT5}ui7a2VX3gN_tP zH$TeSfsp;_35S=M{Y9!+N=nz;Ceq{IwH|6*FE?fT$O5⪚-3TJ}!u6($ZTen_ww_9ZmC0}l-POKoC8sFI~jUg=>c zd)!&9uYK)0gt?A!3K{?xx+0(?1QHIktE=R(!oZ*zqheM3v;{fkWa<8Y%sEp2d0{B8 z@mgL7hiW?hoT0G_F`J)@jKlzfo26gUBic6~+P@1*K|>jG^n+GhW_4t~vU$>dVtvU^ zZa`kr+ceK~boI&F&C{AgeF}CMG`$-zvMF&t(ZptYo8>!dDrjadW)Jtep$DNh+YC-S z<@ec#jYZc+WTVwNFi-*-5>eW8?z3q_RT}QzEx~uzwf6WeKHR({Z+3n8+xa=Wf#3D9 zKA!@5`P6$ark~&1LG?qaVfTbO$7T}3=Lxk-lIntd5vAHK-sFoJ*078GxqwElAndM6 z=I;Y+CJv3nvvPDfg5lp~xjK!ue-?3e7>lLMNAd6mxv~a9T=Zin-5s)eBZTSa9h>J$ zn@DZXzse=LZnwzp^5Tds2@`n9O`7JbQ1CQCJn)K7_ZW3%<10|WR&AHEVAZrgZ8&N2 zP;A#p_3ui-JL?D{Gnqqy640P!_c3Mi?JG3HN}81k%_zXB25xg1mc1M5IHK;J$RQ+W z7g&DuF!Cz}22)MFddN6vkf}3Ws9b~3FoZ`WWlNk{a(JbQ{Nj@-KLL+F?W`uxmAM4e zjCcBgcagTw9G(;>M(lgV43Tl97$4anHaxrGQ2<-K*}#<7VlwfN;@y4X_CZl=o&^`R z!fpD+F89=Xmq`fsf-rT%;)z;=?|B3^m(W2FlhewgnS%AnsDln@1{AI+S@E!id`?S| z25&T9>y@o~8XR8&<#F0BxJhM!ozIsw0I=^qhgg&zQWs#@zrBPKvHG(VRn3++7{`j zeKY3^RMRyt>K1dfs^`4S@h$>D^o%=HufW-a&qQV5a3h!X$Hd zxIi>{4k1V1bw|!cBhooaz8*deD()00WbuQyQGf~ui9*Zpfh~Tr`Nu#i?7jU%%{t-A zaBh73JV$frXmuB@T>U{#%u&_d4SUjU*5D&K@u%y&omQo(99=dDRHD~J01Q|&8ofq& zWAVq56e`au=yToZ|Mcisod1G}RWY)zovAY+h8CS%)rBxa18NCRvwye%*#o60eb?6> z^@AM#Z$^7cYY;(qP=Blht;!M>iaLG1Y483yF8h&4Makn)1=vxOW~2#~F^kYG)Zbl% zg21iiCA+(ll8JG=;ZP?UG%2-lQ0Yb0xOZa6wxY{tq|l#uwE5jQQJqEO6?@Q+jt)1X zCQelB5hkfm+C1+01g^X4txR66q0=1A{3M#=4OFeV>;68h<(s+sIe5QETYs(RI?hb1 ztr8u?h8>bw=tYL~R%UEMB+^6}ck6kiX=*mj01lCf_)Tad#9`awP@OhEKA^aLzkTxyytU7Jc5w%B*rXfMvn1nuVlx{ zS6rpW&A%oCh(5f?P}PsTeb1?*{AH92GbUJN<6?EMh_QL<7X~GJ+$B7wkT39S1QEkT zR>L`$E^e?<13(~(OGLX}QdZF*3OrOwO)m~zHty%O z*i2zef)Q9B;R7#5>*+H^bPCA4=#2(1qBPazO5Dkk%YbY`bO3ubH8eKOigzT@l3Zff z?1g0L_%7Fyi&0NATuGwl*LW$qRm)wpOpBMFTchRB!@_;x3_ zBkCIA!J3JC2A(&Jeq~lLp!#b7mG|F^f@7Ys;{uoXd^Tw|VsxJ%Kf%t@@_WH_>zb0A z925&3F~BQhk&jTJsDZ)eRRzVx0Mhl5pWTq(bz|7Y;+~- zZ(iUbMmIPqLmYZuT6;Jh2|9X;h}XtAiQVD5qqNVG3fD|S?wl%_zi0;F6F%S}8C+?i zyBw;UG#__RdH}fzyVI{ERoU(y?!%eGv-FG$fPlnTz}dtYSsN9EH8e7;QNH&28v9_6SA(dKj3~ zZ2OcZ{b_F!pM0<4*`=%^O`UCdm0W7+#d4g!1tbk|hK`SoJpmyjzS3d2TeErk) z^)sv}VhZMOAG;MPUM-l^n~ZyVZfhQr(&Qt2{k7YJFD0dApz`%wo7MRef4`YCoY>jz z+30VUn$_Y5tDtTieA=^UtP~Ct0w@G89fwFLU8rQ{2wsg#m$F-ALL~6r-8JRh4QEr3 za3y8ns-`~{+SqYWmQu3sM9YFeRrn8uVytY8pnZLq`UC#PsgyY09QHrn zs=JC~W#DivGx)uR>S=kOPcLB(cU1JsOp-StcGchTA|0P8zv^C@6S|qU?Q~N@ml44i z-#^huX#8OiG>L;kq>UwY!H=y-kgBD0^ixpS?I>V1aNGJ}qw1n_cLd3|TV(S4@#Yum zv^_Ab??EB^0A#*+#KUyuV9$_|R*BIeamqzNsZ>sO{F)#s#~d0{MnAB^O^U?KNj-GK zTkK%W!IjpnQtRsSftqZ5>V>|lPUsswo_y`pLS1=ayW?Ghslo1)zRAr()%4R~5r7B< zM9cXDJWVq$AXyU$0wVP>w}0 zFhWS1ue}i~cdqu5KtfYi!GnS2ExM04Ev2K4qkK`ie;UV^M0R+vd^{S*xTI6R>kX~p zSbs`|=UKuF$IljSLZR;-?y+23MZcW$!}(h3lfk~B8aY_G#(#fa9JahgPe1Cadg-q) zOux9*U6w9t|9!1_FYrNwmi}uhh1r1Q)NvCtCxuJWdjjq7-eQ%YJ$clgCc%ocUnGFm76H0R2GHw1e^Z_z+%Ve+K8E@;v8r~2fvRVth zsj;ac4`;Zv2xmo+R>GNaHfafF1I^riUt}oKYvsIwd_&=L>j*O$gJ@|5&&}xo5ii2= zj?6aQWv>U(Y5tjkR{cwJt%Bk@aShOu$w!5t& zpLE>G)QcC~aoNU!?8$GkLgnwH!wVGUQeN^Vq;eSa5}C4??>9$ixg0DwG~HRZ#f(vR_+XsM4f9IvEqW1Ys;9N_ZG z=2vP$pIoilL8iq=HR&rS`6ip&;RSs7lNvGMH&RBYe68q(m=BB9eRG!?kUkmNOY&8Y?q`Zx8QIqb(!;x(92cDCsHukB7~U7L7$= z5&3RhKq4jBNx&}zze*sGtP6zK+k-xQe}7-SDs(mhacHU$9WX<=ywBfRa<1ssNfql) zS$6C#Uyl&EJq-Yd2O4*ZptOr;VCFydL;h~%Ju+r0@SVExjK5ehuk>lEB7Hxl<7c(v zVfzSrbrRLD)xmEIiSjTaDfaAARsVnJ^9vulSb zBQvFv*=aye$uZ;c2(uk2qbq*R&xcLxe+1~=wz40EKq{J=`63peYds@1X?T8u#Nn(> zYxMBEZ?ykAv=ih0fi4mQ*bLVY&b>O#a_)$#TZp43M@NO;R19l92a;Ax?dj;~8~`R6 zsu^!OFA~%4Z3wec>qj&Umw=-*yZssG!TLa*ih+QtlM~eg@Ucak#4J{5&uC`|*hil~ zbc6Y6O!o_pCQ8FEiLrR5S>H4{e&XE~CZt~1@T)JQu#d-#?8LUK=n7HnPA&TBz4N4y z@ROOi*Z(yg75Ry+^}H|OWxv8ho3tb5Xq`3A9aHv8p*`_mj=aqhA%57;I3z@O4GMXsIhCC(Xf#<_T~-LaL!qjw0T zDySj(^|4jGwM;&g-P2ZA%?t%oMo%UcZk3dj;MK~k%c*_NFc!Yau;N^TI8nLa{}}@t z=e_=GUuCR86}535zck^J$KI)^lC#10JgHTG$P`OtLtIPVR25LyG+CY9#AMte!2eX* z4&-2Mt#S)y71|Nz!(VSR77~wMWad*Ni0FqGT560jwO2a z*k4R7Z5*w4opqwWn4!#CNv70lV5gfWrQWL5OcAGH?L38}<$JgA;rRR;MRu`*pfb{1 z*inRmk8jhrx53xHeftvMbp1q^P{<zbrFw!(h3ez+7^= z0nv=aLG}CO=&F>-VY6wz0Y_^Y+NSQIaYhBPjeMMBAxbCx!%y?I7;4r`JMkLg&!C&{ zY_;&_+;6X&+e?6^vzxe!lx{Y48xnJF zrKqUE3`Sp}534qNO`Ap@#7ACYe1`ZB|2LZUzf&bRHaC2j_>1te8~VnX(=lo)?EUNB z&$5|~3lkImy}i;hb{BiIet>;_(R?M%a#Kdhr;)Vlz{`1=UdR2Gq56khpnfemb#ri+ zLjeyB_>~xFVyI9=j@J12NH*{75EvK_u5n5A39MF)R6l(-xu^ItDN)wcD!HW3DluI{ zFJ;_w2rP0kzKgo_)A^ti8uJZLdYTijqmEo8xDLbX^w3d;ZuN2mhCxbW(UoLjl1wX0;DW{Tx&b;9HlitW$}|J5;K0vtyXSQ56WnZ&vr?bL(hMtH z*lGRS(l~3X=rhbZn&s2qPBmZ}oQ@Hn`;*wRu5d#Rc#YRr;+Rp#Z(J_(23E=r_U`f9 z1FJ~>M$sn{{M$Xs&sgpVM4Fgo_D;Mt4J!;8Kiw@wQAeH!1hR(xi7_!T^}q(kjUq^{iGnsNq zPc*d}K~n1qlXl|>O2UeyG5UeBbjEKG6>0M`2lx2vdh?lsRZ*A{d>I7lX>()rMdYv zmx}m;&xw-->_geLyLoz78MTqY?Q3p{{YH9vX^G^d+t@qn&KcNM>Xt=sqN#_8TwQ|? zmrsg|pHVqUqbbqb^}tgL4VQik3=2B}bhTe=YPi>JCU7G{iFXdHTD?pkns|AAOv-WV zng5d1qxt{g>Y3APfSr)DW;C7Hm)3r$SQ#WXQbuJB~42w*5jNUh_+s( z@DC?20!8W7YfX;gZPLgk18--n2v|>4WQ=kf32E6ikLhe2hk^9_7-9Gj1B0SL8nZJ0Q75+~dfn55R5zUU4q^*sFzP-@vNzHlqx5@ zSr+^P>%($Zg@90;50`2u5y0|d0Yrk70!TL!j7BeI8{Y0>Su4--#7v)J6s4ox8FBVB~>Q{xaKfv%~s zxgsJ8=gHrRY_PQAg^E`nzH*$n$}Xcplox+aLsNjGjvedKmxL$5!Ifm*0?t)csjbw#tK%YOus?(7`tD`R$6bic29B&I&vJm`MhuZiqM1}!YCG=96PNb+|jy)R}UWJ9ks#y5DFFIgmb03CX7LQ&DB(6OE zDgY$~h|(k?Cr4B;*Ku#rfBn7|Q*ov6$>y#^Khz%r_oM_FaXGz+*SH^c7ipQj#foPn7zoDxKjL7_3wdeL&Y3`l<^)Yao+zFS&ZVb>0roI{H9kyh_r zbObNDvGf0XDgLdknT+n}3l_<$2W@o(^LIa(9ZDCuJmZZyI=de)XVx{IAq?wb$R%w`PbF#PPNm*iD?)_OG`)Ay8?<^{o_tp@%1~8-0&9XJ&~1LXM08v z1#H}`lqv4LoMR~Y_3T+$8n2tQDt1UD?%r1+?{lEM>PcOXt?fOJ*Ua~()3(2!E)h~( znfDS)jMAJQH4oJh3Dil-rRJ~k9^7%Qqrj@&+{&*X?u@O{7#|X_FB2LR$tDBy+kD?B zDdh%}+Dz9SL|Yfl#O_a`o;$v&tqU=lJ}>E)A7mPB&0*)_@_*;KN}2}~!!$t$D%`8W(J39Z{w866||4k6kPuHpJHQs@Trv%eL9DSHf!5@!@?$s}U9vS0x zw-D6eAea&>2$82^qi69*xvGz@i?>XNOz(B>mpd9==Iw3~IpFUhW!4*3G-VPkRK6+shKZ#PauN_v_a}BZoq%f6N90B{Z z*fJ5AA>@vb`F&5Tj&qpE`(#t&=Yt}evEx3Oo7YwSZ_bB+n0P&q7%LerQ9z1@eV|Up zYb<+LyV%zKspARQ2bzTv1hd8{Jgulc$+0FP zyUjGZ6zeH~+JRuf(fRped)5%VUF)?K!se>PhTCx}O`o@aCj$)u|27~2-TCY0y2jW2 zr{_6E7QZ-7+_q0wxb!#9OzD;@#{gEoMp#R$_ulcv%8(dgFYT+3RXkYj+%+{{VtCt? z<$5WiVC>{!h3n*U;_1v?W{zFL3*77I`_H?#(1IdnQ<=(wVzOVnPO9W_JU!*jzhYKK zCQibV%y)cSw~=RL2PPVLVeUX;>;U)PYDgz`b8_WM_`-xQr)5ar#|H%aNOx4D%p&Ph z$U<$8$c`Pd{EI$nrBq@??R!8Ecl}BuMV8xev#H!lGmUao-_<+;B(KVF)Hczx z%e;MqgYGN4-7zfnU4Zyl}@gB)ma2Ha@Nse!u4w#BR@xMM}O+#_e{36(nf!LAhf* ztBK^FTA63`(|@N#BzMebH>Np#_o1kMI=KiCS)W;833lpIQl)Q0gJkn86Vsu#9%atn zt~w33hnLx(MOt4brWX`ZFQa?#Q!E-Rgm=gx%GGcNr1G=VNZ?XSJS~R|7CM!ubH%!8 z>Z2AIzU|Pp-0pu$4v{B6)qx@eME6kOEHBwH*O!|F7G6qE_ja(0{U+=%zzuj`FX7&n zv+e$Cg0w>@DBvfpaPf}jGFa#j0i)Y`ravRnc1@A*F7MMpys7~XUNNwom_Yw1|LCaL z+Al6W;1t5&IaN8q)`*$>n$XL?m3ut39bc?LX+bsrERKwFOSOOJ5pv#r)WA=EDyQmm zL7o)PHxUsdyu6>&NN@Z>wJrLwiKW;;Q5U8>umW;)!k4a!G(q?X2xMq|wOC4sja_6n zy24y`Il(@Yj=%xNpAOq~xsA?tmdy_w_UferP|p$a_9gRXU9F@FenC$ESv z*krFbUpJ82{}V&CdZTyBU(e9C=+Rh(qm`-XW^bnk{PzbhJ;N>|NUmDgfj21pkN@tA zAtTl3S?swU#fNZK{KQuZykMvJa?I$vj|RqvHj-eMa!VRgKl zH1UKtsiZI2#M;DvDgI>PH&d(l=URxlR^KJ5ws4|$n?4(R$UHek2P#Dz4nw4e-kqGB z@byg)Ju!wk@Wq<2Bbf}Wi?zdKf2`N+IiOluTlxzqEZ-<5G5GuT65N_!IEI=<4M#lC z+(%(bmmQ>SwEWMgsS)XIr;M-ZW)3RcdW!HOXa5wM+<6)c>ScBjURdgc$V$XW#`$wK z^ZW%`+Pwd3vFze_{bWGCa@fv z$4n)w0*Y;wmOkv`O(P*%tVtqBx%!nWBJgbb8(C+uxVY4Gl}7!y751MP%*9BHuQR;a zXIMalwXx*T^`zwV8tgwBp*ccz__X3wDeWc3q6PbT>RbGWvUj$};7 zXi@9)w6dfl-2v5e(*E7#A5-BNl4{*Vt!1r?Pfm|hKk$UFhlqxH#vGr3FEZn{Ecqgn z<*gwjA|lbS=bRzJ3oGBriHU6z*79lj_gGGDT-Jh7rhGR)>Usjp&Y$w~kRPH^-oIwr z$oBdj1z`N%m4eAGmVc@4usTYboQ%vel`QO1kXw_g9az|##9RJ=1`Mg@bUs!W3dK(E|1$j z^*SNPlFsg8*dI|eacTV9)872cPaf(l7gt!$N2!0p!^1Yv*9dma0gYMQV^DE+cW9N5 z2{|bs4OV+Nb`GN#4@(nF4Kuj5OV>H*Bbpul%zv$*<@`vt7XxXp$J5OEWxr?1aKALI z&)pCxFLUWSle@kSbC#yKI3&U^L1bGr?r6Y0&xX`Y$wPnw4Yi_Oh)2|pgQWA}>OE{W zA9+)$h#fuotm+_cgYhlyF>@a6I`YR-d&N@O^JPX`rTKUA{0^M|_gwDc_?LMD{Cc&W z^uB=kvsF*M{xhvXne%mZ{2uKHHS(FWi;Mi`W=bG)ZgY3{wV$8p`Tl$iAblt*D@W&>0=BmZ7ju8AAh1DP<9B1z zH;|oe9iCn?&-3UDKNaB}oF7~Xd#9lU$=|+osn>2NnjuJve3hHHWZ2jUQigYHk{?97 zFjl9q5@l_4W%NDs0;5fMEegf#!YeWAo*zHZAALabmyk@EJxFBfM>^McEFe_U_ zy0>yNzM+ML*sA{ALu3*Y56!^I`T>_i;m`q`gi~tx64Zp~K}G^@P=dtu*Ss8T3?Xc; zuIP7#aw+;PnU+#?j5~4$|32(d|2^!jH5+u+`_){7ddj6n@K){a>he4JipTBTw|LK| zK{5`&an03!>sq2$j{~fN%E-w@g&4{_Y@!C;`%7PSbv*&v%P4>{fD?4PLW93t#nF?m zD)xIOy5G*$S?$#a8~xcyCl~44!5r#XXV=;qQ{inAePcgLU1AIDmujADS<9fL(Q%3@ zP+SsNzc`sN5nIj&1B_vtUXW>PpsexUb&s43$FmjQ7To(5duQ^V-t)i@ed$Mq?)^mG zb1L6Y_(2y(BG77%JG*bJohzzhEv>}m`$CpP+~QZJ`p#REOP)7lLhqS)l+2mO=<-_> z({wGoid@DxH#(9JaBsC0zrfqd3BZybb`oVr4rNA zT299ll<(AQWtVK&Bvb2Ht3=LjkUx2RB9|tFsF#!)hFwFdRaJrH(BC)0B25AqA|R0{ z8Yj_UzVow{1MbY2Dk-Nvvs@e}{}+));$GO&E2bpy{=IIWor7;SR)*}Y#JYm*}QNozU;CmqNA#fp1`s5 zXb>mQW};1!s3UN{x^KgE3A$}N3i=a71d&>3A_E4l{ACG^6@fJr>2X6cv^o2wbP)$F zHNQWN-lvAwjcNRy;b#8NG6e+seuTMWr6#0`6ss0*@EdNEG!9b>lozZ~>5eW@_o(!r z=g*5XrQG;SJdko4R^mm0j54ZNHc=vC;w3wy8;Y`~Z8rb=liVJFqKAx>d<3fV@>~wT zIsv!A)Ppy};&Hkft9)X(x}im>h}ps050=<{x=_W$T-XIXbLMsL+>_I)5f))$R@)za ztJ|z}qx{9mM=k@r63-QbiqYSN;fi=-m74~3>b{T*UN8#vuxkqs85}k?S|RDmOxwFr z9>t@eA?x1Y`r3^cT{mwc`rLPvgBRF(2Rf%F=h-d(3fL6NZv~?jHi|5QOZa~%mwuEC z^PR@1Z9CajZmUZ@B-+vLQQgsrR_P3vkrOnAsqk&_kp_bo9PPiL9kgH6H0>oZKn>NB zCeo>6MU`WJ)$u}m(K=FLUdBWog_99_0o!NR3(aPrLEcRr;Q(K@-!1d^3Aq1-ID#R2H`mdkI z{O?x`Kj&w+>ng5p@#+0ZR?(1b?~2RET@9zKI%!9^e-3dkkJH<+>uFz}#5EtZ zQ5xMYla5#60-v^|Kum)y%H`XfBW>(*hJwP5LaTX<-1e^JI#($>Hs54MKAz02nncLv zFDsWvi>q|@jPU7{8pN3p^7L!dXz)BtJ3X1tCcw;KlX_%7Kr4m&%{L>c|Ga$1=g_3j zR}I$l?JFUj!^VHUmqvCAybAp4fW2_FAPurGM0UK?eo8XFu9HI;2AI*jEwzLU=NA&J z2fR^>?358<>Hg2$`bb&ekCV*##7oueKgOCG&}w)xlcGB_h&^@4tV?g#hi+pPv(GRH zyUHu-SZ`r*pk@es&zzedABjY?4u6&5lIxhZus~c-;{l!=JP*8{qP_mo(nyzs(dwe2 zj|WE|f3D6(hae-JDk&Q4fs-qCj+cBnFi7dgvwAMWBGk`z?aYPjMr|2KPMu#d{VUTd z2p@Ub{5@=9KhoCbHpIs8pPJ$jh!09rD2yf-U~{k6F9pTmV7#gQ^a%;D7t%_MEYq6UfG4*F=#M#s_!RcOrF;on7`(o{iClpRCBGDWSlu_<#YM(x}`_AyvG#X|B z{rItR_Y3=rdS8LxGdBwEXViEz`;sETPPJ5By=N$Z6_5zd%^_v-;%K4rEq{F4J_eOF zdG}7-9q5-ZGT);QbkKm`3rl08lE`vNPjM6T{ z^H!y@jISzNA%|<-6zpwkFlpbev|eOk#1XHR`{y~?DAOm2{2HRL-(z;&$}-j>66ZFU zN_?{Us(@REp@{(ls6rKtI08e%VB|cA!yANbXyfpeOd{Rw;M>-wfaPN$EBSTnl>h%+ z;gAV$7LV^8Bj%&M(19vztf~9eIhGs2PqsQp#0;HZe+4o}BLIX0UJNOBj+9a3t2B!T z(B%UI8;O=G!74@k%rHn2B3n~SW08(#m2)A`Gp!3bb)K1dxT*({Qwg+3T=P9Hj+LJ$yt%#>fh};norkO4pHsHWXuZjAVjtc^ z0MGSM4-eom9#0oEba3c4*C%W&E-u=YaTE-N@z;)5jMt6Q@|Vs?daa`ChDVH7y_^f1 zMNR9lxMnp?sI1fY9RxdqX>v=GEglRwkp5*1l>JL*;ZbPB(R(AbvK@$2(70!_tu|5? zuwP(nOU*47EJ;#>Zo!=E&-;+{mJjqYgVv2!{{B>1f({*J2F+tt)?)FAy>bqm5@=O< zl2nNPCRRJGoU02Kj!Y{TKD~?zl2}HR_vTQskHhj{6y^*zF@a20c_oEjjCnV##6s~1 zp5jr{+&}|mq{wbF(C>1k6K!qnn2rvSB^KJud5rxGmN(vp^CJ9X&^YNgMqRULfp(B9 z0sJWPK5sE_gD9hoN|jh$C_$=bv0`zQO~IUnEnHfL4XZAeR{VzIF=c{M7EK(Bxk`^cIo zXm3nEg+or;NQ=4a>IeEDwrNJ)M+Pcts;>T?1g!2L(Hb%f)!elkX69oauT(hpP?rI5CHm_KvUcv5XGx6PfCut0*-=%(7d16*0S!%{Ta%PzGE( z-^R7KI4mJ0EbB{a%f-CGf{64|{H*<~8}_#^VAIJLJqxkG+_q+70w zmEyqn%oR5^{a!F}+<+zfsG*W1c~$U}nlEL40g40a{|$v5;~!~zd`8PNhT=^W<&4KU zx~N=N@wbxbVPzeaD7)qWOTkk$HiW_7jdjI0g1rHNWwX~xrd7^&U`;KC%iz2qXa60# zN`2ijq|t3~GJd7caBo|xCp7Kb@)?QoUU<-W>D|`?RQV+DES&+PnqKP_z)9_p0$r{% zbnRmr9_OXRXB(S52muxg!qVDs!}f}ziF>7=7n-VzV$P*9yQsama&&HNL649I45L4W zxjU|Q9A9}2KaY(+2Sk&XpYUY51-=#<#y2&>^DDl}gN(vsl`{I3rRP^`VDYdjB@&8i z(b`GmZ<^k=oR_L^J5GxVzrZDOBp{8qHMW_oL;Uhhtri?YjkZZ;A9=#}f^NOA&+W+l zUZrD^YG}bFhsf#puR!uQ&K%*Bq9P6RQMJP0?hGXWq)U;IUs;ZehKA~FZjElRvSRG% zrl*IGnMu-4xY?a8AX-VQ7P*#4Py%InHE*6{K?e8bIYW=j#o>#jXZA(N;jjqRA2D+xoTJI={xp{RQeL zZ9JLlAJ0h;F0t+kwNS{-E57lCxaY;>Ht|!MvkeTa!75*fe=pqgUP;YeIgBW`c45Fs zpPZnx7PN5q9DJvpokR}|$A`{66<_CXpGOP3z017ijowNZ`9&V=$U$B|Op9vd2O{{M ztEYRYX~itXb7NBq@B2d3^J;+>=Nh%LbP?T_DMVXTHO#|I1HFs0?3rTu7x#(uz+a~W zA88Z+Zh&QU|91gQ>@2UK0Od_Qm0V`)N~#418%Gzpg~eCQCl%Gr zl%mbe<9ANyr=O;Fx(gW@!|URtrM=5E{P(a(41%vwC^^a*7(-qxUDNsn#C$KUZB4o+fRzRwijfH#14GU5nOpyz^QOu# z^X;IFfmxAA!szb9!}~js^-=S8AXOU3278E|TOZ1fX=)M~o1D!5`V}3>rw8Clio%ay zfv98wFV};q8JM8t0ZFXDV)rXBIN3!^4V#;&W~sJ?OP@zA0ZX=+(z=93V z=cNAOKM{!UKM{xr8w61zT~udQ79iz&R+Xu99nnFL+UoR>_Llf_?6BG}5xp9B2GcyN$r0hX&0b9Sgd zfc~tnHCg!f{%6^z3X&bk`vd1jf2R7hRAQD!`eH)IG@&o!9>S$nAemlP9eHI-HsPms zj*m>KVz5;LXY@py-pqt5<^#6~vZ|!~rkIM7)kZY=9{gkbNitH@_l@x{*k;&+gHCk( zJWql?p%?9js_tS0tI8FI`5<6SqNyaAR$jqQ$*{vGD>EWVIoF)ZX*ypja?qb9?TW~3 zOo+lEt-S{efA*87j&5oL0%#O2OjKxrXQQ+|61zo92}&kuUBWFz%E6&CeXau zbbb8j?a(gn{Eb#voErB*m3iR%f^K%J;kV7b8z*^y_W$(h)2RlReBIw8BOic}3w$8i zJY~siJd~@1R>LBu15b@XmdIsQP7fLaetNP6xq1AE43yP^UVvlx^!ECiti* zEHrm!tS!eNC$c)-%y3k@4@peNnP`}Zim*=z(B&TXHmMVORD%kq+5$uaPxEQ=Dl2*N z&ymxy)&eB~U2b4-xCbbXpSz7BF+USe3RMy0UtEGQVKc+oppo4bYd`S44Y=Hu6w#Gp zqA3rL6at^Lx4-UhSi+&?{&8uqtg)SG?sm*bK=l0U4>v>WV8W!9Q~q>lc&&g39k_pa z%))ECT7+CpUKxpu>HxnkCVD52itc4FLuII!dKHL~m0U|rh9&4p2(k<25)fr3aeGJ% z_1#W|@~61l*+8IJXjMQ_)U>M%p2ldC%x@U83SncO()W&bNL||Y)Nz@nMpDfJ)TW>xiX)=IvVZ;U2k#^IvVg^by z4?k5o3=$$z(Sun~M$Pqb_|KZ<^6YHu-5tU{uI61{{~f&rDF=tr6$##grY4GqL;@fS zSC$IEOyY_iBIS55W4vyxeV66mc^TN<++gWVJw#Jl6`OkPV0K&cHUS#g`XMf_hB5uw zKOk(p#{Xm|j&%|HXY;WtHIF$qykxTFWtx!`e@*7ts#`Uk7TkJ@8L<5B(QgkF4?M5V zwUBY$pKm5zxIeG2^t*8E7m+dKUcdB5Z_;Nu^-bLH%>TCX>f4&shn|p6vz*bGVf^OO zIkRyOH_~fxFe8?^sui_?m4?{XcIaEv(?6Ky$FO5BFlWEFV6t45_7$s{GpElMwi&iEo!>-vmXq5(#7Ov$LNp9e!~h*Pk?1Iqe9wZgyijW$M!qV z+n%6XG|%0synQSM9x*j{G8*4-=g~zv?~LO$%9D#QH%1$SQ5|eJyFX0IwjHG%r=aZ3 zKR4?CDYyeAo_r)6*-{5t(iiZ1#F``YApv_Q7iMlFCVBQivaOCza&>(@>4QpG=Qtnj zE#^37w0^QQ_DhX}mE*MuTONdQ`W-R$a$lp{k#~$wa9=NT5T;ehz28gEnBX`XOa)6; zHa`tH&S;LOu={uB@UCvPer)jgab|WQBfRhsXnusrwc0J=zY++;5#5ChCzwS^IBLPX zoaJ<)so3&zweiqEMP<9<;Z5Ky-rTf)tukzqlFqGYb@?+Uq5Wq$Z~_ngAE1RnCjv-s%S&&*@KT z@7>Ct!|VfpG-NH;w8Pt6ed-)d zKCjJP&L<3GI`c>9YF*;Tj%_!jz;>Q|LZtKEyZm04Nd+@})nJ|z*D^(=?J>g-76D&3 zRJFb)z!wI)?hxmQwy;X_;taY1Z}S+|v(8qMHEmdcVgp;qT~Fm%eLW?^#$1b-U2kUG zFj>qnDLe@rAa1$$3^A40?|thn9dbSMkuH9#akRcRGV!&UQ*2&f`mi+y;7Fo_k#=3_;Pr0G1h~Xv+a7k+|=6o?Ns`P=So$R$eiyl zIc;5iY`HWyHvYj)tgweKMi&TU{epBUDdwJrgsb5vj9F;`c@dU{0^cRl=~bXo<_IQ* zM*kSwMjstKUQW_88g*>a-KiDs-mwTSymLhl%U;IL` z^Rx5M$^G6Xy~8~Ky7}f@ADT%AEDS4dOC-xHD1fPpw_w2602tiAXuIs@#%y;(HNd|$ z9}48Z3P%R{#3-MYN6*>5OIvHt`}=fnMf_*Ox>KB05KZP6)hBE_-{p{n6 z$etr@1+0$Bj3(|yZbSj;og`5@*Nb+!0h+c|H#r{&v?J}Hb6c~T;fJ85c0QOe7xH$T zm(PTYU+GEVR!U9UaIH&$G^j`kXfn7O<_sNd6dANuQy4U~nP8<#R00;JdV93;Kd^@I zm)Xg`<^4p*q4p;S6tzT8s=VgXVTMr~`==hy6fKI?M`J&GJ3b9ysRHwYp9(s3vvo9& z>gsHk^OePovku&HCQ~!r!S`pD1C~GOGKht@?>6AxEco^PJTEzRwf|b87<4_wzgl49 z#x;~OBN}@Iv?8H<2}1m2?FHF!tX9wZrp8hS>~w0MVa6E2teu>^e)ZST_w(Kl|GARX zZg777;580SUcu?*nbSI#a6B-IE_lA@Wh?l4)qTSq@m%$9z(*HH#D7JuFi(T!YeA`N z@R$Vwy@w_G;|lhvkosV8+sxTL1b5&O)*X6tJ!vDK{mYB)G0Fao?%&E~wh?!5!n2ETPw_+@!Bz zAGQ$65cEw@tB2TeXz zs=CGFOW#B6``>Ez$P`Zgx@8Ky=pUwFfDf;AFjG=EztTu;U4`bY?^iyKjza#|1H?q1p=7A@G0N6fA>sfz& z5~bq*C_(HPU0%NCESz4A>LOjx)X4Z-Q(iZ>ouEaw@EJNL!Sa9o#Q&`M!qh)Fk+2HN zGRXnK>yB(Xqh%-uS&OWl+m@iO?X-Z~WDp`iekKZZo)77e07S1G@*NGyQ3nw=K7P4- zd&M$ZdXcG+XG6RamqSgpw)&obN9 zDN~9n(H@BqW<9rsr~h~k69E9>UpuYNkjE6oY-}+lGBl?&13UV*s(s19h~C5RMt5jt z$4)|bEY?M*Im&;=2EH^Tdf-mXp>u z6Zb*3Wfgv2GHU}Z(wr>^*7ADy2U6au_eXm)KYj5hXoY zMd`-_a)Wj^ctyy~KtC@Z4e8x@&DB&<=&H+j!h}*w^Ypch3X3v_yQn>KJtA*5%&%)}++#T@zd|*5UX?REqvAPD2fOb$i((89tkRpW6zES>Kg} z=0nYzc0XGH(ZnTXIEK?#t1Qz55KA7W6&Ep|U9x5U>Dy~Jm%d-c9Q(g*-^7y|_ogyy zOk<>_7$p6QPR=3G$(>QJoJpzCZ3NCDC0EBc zq@V=%i&e#jF*06$D#v|kD!;?+zl3!;a77Qk& zprmxUJT!@Q;VE@)p4YB-eD?54lo+-DUivzy1IRAa0jH{S&Ye%U4|>SRsJR@Z{Ev>_Nd=O`$?sgDxb00Q4*z z+Qvd2#lirkw&DJIXJmT1Fij3gP+e<a4wS5#Ew*@Zk$?(;im``GT zE{e9z=8rykx+gbr+q5&(#*u0EE-H#y6)~^CtJk`j+cn{Yy{ub|%$H;>m!VDK8v0dr z6KrNK8c6ajyNZ?$@=?#AjAIIIU7pnS%FJfi8WJ=jNjrr&ICc5?D@eAs%5pWXbfU?Y zBQBoIFBq2!@3NHB;B9s0#{+aQ1I!}18=KChH4AZx@cf8{Z&~Gcmbd)^aU&%7iKHMXa?@UFHpTmZ8KyywM#_smk(z#s2;#~Q+19tf<5tM|2Z(Sao zwQUWtiXKM1Zb`Wi6902pTCGinC&~NWcdmURm8@lVHuktkE+jo&t7OZu!)_k&U3WrJ zl^NH4nO|C5_mFs45?%{~`xsop-T^G9m)Tt4@}tH6MU}`dW02(Sk(Qt33mu`+5()k# zn>v*R*e>Ev4SV+;$=>IYO`n&iSaGzdeexz44if;~k`MRbNd# zbe5DG(F2@_mD(YEduM#eC&OReCx5zEoEJFwdB8oOq{$tMOTpo&-saP{3bewy(qO=~ zjkTcJZpdbKIe33(I{|a`F9Sf<%)^fknbwCm#HX|EhilxW|Gq^;|605+ z(O+Qr&513a)u7Z{B2p>~-~Ewo`d(FBn9ph57W!k64`DvgI+ z?X`UcY@;`XMGs$Cy4yymJxbO#eF{GN%7|M9884*DUd30I7{rPm6((uaca|eC?LF~o zxbo0%SW@0YVjhmXYs#Ma0U+v1%Ey3YA_1s@ndp*5rj@sDt(NCudeWU1DMPYVxN~7| z;&YdCoQc*BZ|n$(sSjSBcic^)lKc*;OBXAjXLJFXJa@+1QajvI#9>zUjmsykEKjlF zmG8o}wRODXJWXU<<_1?lXUuI&mwrZhm1S>9=~jEY~s?4h{FDYn3DZwEdC5!Nmr} z1AZ&@^|d?^Nt(6qwDPxsDVKLFr&mZwJct)~n0Xe~=m+kepMy-L94HfdjvnGp2Za{8 zRD!O-CEAsAaaskc8X92_+0a6B&W|Aw2oi$?`JehP@$u}8eK_4#*t0@dYj3Z%(g zmD?Hq!p;?O|9deX-E!XltR&aEK&r^m5{Sv@e*>52RUO|F$@Zjwr`yF4?iD&>(ShEew)kZWA0LC8V)LlSo+NS;Dr0+qcn zHKDX-y+_s^U>h?k_R%_G9!wcSwmWB6FkTjr#VOUgdXlrW;(eQK^eN!*>!EGu2p)x~ z*UGyK34K4r9;m=GZjWc&kU4#XJmG^J0Ry5GkqOn)#P3+PZ%n$rzN%`u#mqBzcO~%n zfTWuQ*2KZC%WE%3DX@4t$CCy?Z3RM-eQ{}z)z>uV9a9o-&(_!8du`8xZ1 zlhG%l&sAY3^oPBljWcbhN&QaCc6|IUH8&yJ7|#_GU#$HrdT~*a^V(;bsp*+$D;yfc zTh~5&+Dei$fl2+4=KZDzJ-|+uyA}Y813^QGvq_@_VCI1Q*3vUg@cSSGFiKssoxSsf zIBIXidDG30f&a=osQ>abm+3=lXpAY9Pg$T_@;AC$fP8xT)Koo3z5YSD{``4twEh^7 z*oc8W5O6&sQu_F~i7M}qV$xF=#90=?h&G!6QW07)rz z`3F+ffQmt}x#e*cA#_o_N+gjVp;*uDqKvt;e*9d7peMdsdk~C?7kuWlYv=rl*zdOw)xm)u z$x_8t%_kXZ+zAa06APQdinIa6>sUFVcyOML>_OL)b9cM8-UClFU_E+;`vcS(@R|l% z*|F!yS%TO~%E}NLF(grg9%g{o7bk5Hzq;L_c`~jWGo54Q_S(0ku(UfD0;#y$R6VP9 zaCIeExcPnYb7m$Q@Y3mB-n#VxjLbveeMPV0Lk%}dr|x*x+F!wK&!_bBm0Yu4yyd_5H5+{Ep1deIuvaZaqIzvTQH19-O&o&KCVNlnl6$wPH@ zArlmkr18$BYfsI+TFfZVgYz?!_x*i^EC!)6SX>n#wV9Oaq<39bwoHS>qZfl+4yej6 zT_1rHGd*>(V3LE3%YN@$v z5@YrC+!cSPvjgL#UhSOIxZyME6(Q_1s$K?*f!oeH&aCi=GdnFrB&hR<-f5|~r!780 zX#Ty|!f#tPVTAw*eI26&(R9wvt%3q)Ru@=>vQwFaS;_Jj`&_zkomtX&Dty?YM0*(y za{cVj9HX0GM;;avhNPVsmOBMSJI>ovlcOPTB=IJV4Rkfb+YKroRdLYI3uqye4SB%q z%24&W^sCJ=+FC510>^FMZ)sfdP|^K{$9W&m(q(twnQzE3XMahPVGKv>!}&bO=lFG4 zF-IqT^^tDwKY*g;{73()dkN?Shn{t2y!#6SX4*AEoOK!=X*W36;bRWK3O2y!7I{XE zv^cwPFNkOyXlnHF0ks$P9j{le4c^;)1m3KQ>SBdRuZb<8B3ETq7led{0UVPpsu&(A zui@>2E77c0YR3e6szyLEItfvf-g)OLA8X>u8_L-n7_EgP!K!ccDhR(!|Au((Ug%;O zQft(`AsL9v7DF+1XSQV0a{8SRwxKR|dmEIT;e9%G!umlj8YkJ^S5a(3@li_-?QNS> zZ3!SMSN4PulxuO?9eC3mX{U%XOA(dMb$q`I+RgUz*B{Tk2(pUyEl)GoOX6&#H0wbL z`S6l9G%1uJG&B@N@cFcm0zp;(KGQ+gE>l*hz1%@4!xuckmuWaIFS-|z1i6e?9CfP3 zIdobR`1e|Z1_oL;=KS`vMSsO|eA&3UiWP^8Z_Is83Tpd-*rmTnu8_&ao@!j~R*oVjR^74l_3xB9JI;g*_ zl;H)cx?)T3zaM-{@5yUj<(&BQpI(GVfi~|i$13gH_j9EK|A|^<8bt)*i@jO8pz!Y_ z&l_YO;`;Qn#0BD7E|+7x*HX1=Tw$|uJ9f5XX$%>g#wh2o9fzh3j@wHnxyr;=!!Kghx z-^$#ey~Ucxv2D4%!u~!iGOA5y%ja?tHvE0>m)EX$z6F}>a)XGBNr)TuJWaA3TqEru z^a8R24uAY(c$DV;A})dc$tN~Ga52^sgiGDd7k=$FD*_KMO!laic>Y>O7Vqc)^AWIq zeeLXMV+upr$gLXYXN(pqG7)@CltV;Lg!96%WbDSDW24PIVPdaBEe(ylmw2@@1o&U6 z;>IaBO}}XWta-ik)4#2e{P~hZ^VcFwVUmqrvK4(yNTnS^n+Q-<9o*lcqtjXGXp%@& zdvgAocRcZyfvRIIE?+`x`28L4eD(Q*+Q#lXu(%h?n)jtZB_w+*syaGR592!)mY>hg z9y>|~8u&jl8IK@=^?zq247R%v&Pe-b)!x!$yIvgZPNo z17AU~y=>MRm)H7P6+SL@A4XkO2CWw|zHUW}fCiOS!dQwh@)Bzi(A)@Jqt0Q&%b-m% z>$G^+tq#u1A8TIp+|6U|w~UP%+IlC-rA&CoW(Qm8_Cw@&*jq#N>(LBk#NEmx6s1gE!oL0z1H*ku;kC8x#ClC~heQbq#q};;yoHjJ-7mpn#V#M`TPqqR zIhQk@yryL2FD>CORi*OM!Bs;p3yKXOHIlM7(SQf zMd}QcG?a9crQCLDboTM6WgSu^Qywbqs1=#b_Np&~jz8^_ zz^};$Qsz~`RQAzzr}S5}#Pc~^LrYm?2>Rxr&^PC|L@UU5zI-qAE3VBNu0J;~o_)zG z(}*4Pn0z^>bTBxfWlxhC;}_`LIt_{7W%7K4UkR2paNX8*A=Yn5z{D&nn5dSZS+}xM z@Yu7$>A#Sr+aDrA&rxd*>mQA>XZUS(+#BtbUvoVz_xccmUDQ~`d!qVI+hTj?%yRM8 zgQg4OL6frW)@rCP6x6n7CEX$%w>%b0!k-0QI)-*Ojw|fF!7vS~@E;p{Py5qXs%)=Lwl3QLi0`-m zxo_xk>PEy-9-Wm385H^P)IaXwMIDX`0f!j4ownO zA<3+16?wWA`PPC6)EHKQk<}`^R;9B#HtP)v4Nw|hg3sRLN6dK2y;(UkUliB7Cnqhr zeyiTazaLu&PT|8HXY8+WJ$TDJbDfnE&?y$1h%rx=oufmWEnbVODl2KWoEfpY`Q-i3 zh$<1R;CX*Od}0seH%B#H*i85iR;`iX!sKo1Qm|Cj58hpQt(&y(kiRmjc3X4LUvZt8 zxvlWDhcSH~-{bnxv~YLBW|GRv`LI}UT$WS!NawJ8Yq?GZl8#~diohF+l^AgRgqZXb zB_3tPmsD7&N07yJcb;%|Te}s+J50#YI*Bw=G6{ddN?d#vdf4G;z>Tw~yZX+CN3%c$EKRaihsu8kkBYykJznDTtN7$xX|wsMoHVcE9#4Y3{t=Yt1@+N; zUewnm*&`=Cjh?3DX4e9X$Itr7-|1@?8;~Pe(>x2qdTHY;0ttDA^zElJ=_6#CmfzS& zO_BLqL5eLT2yVL9?+olRi5(|Xt z@iBeA5JR&6`t_x%Y&eOTnp;di-UM%9rS5kT*+{qB<3XVj-Y*ANJEqvV`qii&VPWB) zh9*pB`}4>s+mp}kTu+KLHqWk{OWFVnHM*U>hj@KeZ&jjLo*iFeU3U&z9=pVh*Q8hTt?lAiib`fdw zy6O^s&}wt?Yl6zd+x6wl=;-Nh`PCW~ElR(I2|sMy-AktZg(IcpA6ur5_S$ZIKybnz z*;kGQ*F?I|nSztw$fToxZroO5XKv@_{G`}6NG2Y=XTo%Q{o1TLNPX#?-@W(8k2j8v zVVaK5s8QcASh!*jb#Ct2MWqhpEjNjx)R%?f|MJJBpvnTrg+ zFn?UGhG%Sq)YM4P!T6>r_CZ;x(o|kysX!Et68y%>b?9ie{&&ZSVL+}X>f-u%aV4x} z?pT+7BYX)e>g*%Zv@YlZv9d>gEjvVWE(ev>UE?iI4)4~V> zc{Z@G#a3W#g>KG$PZs5U&$0gLte59r{r8nxUh%FWH{R=wXKzNp2&CsP(XH16@ECZ{O+g+0^Qk&;j?OWUlMI7g7MCKsclu=5(-E`Yx=?b>jXqCl48{R%QD&NA) zS}RI8mftC=|E!UdhC~l%H1r6eyaLBQkDnj;^(c1{Kp&J~uZ6eT7A#e!%MU14ezKvU z2@rR^1)Y1izrB-auqPMP(&&CxJeX$siZ9Pkmn=OiCw{oZ2)A4zNdFWtZ7(MT-RAH4 zRGF4SS#*L6#=nY5#LbH77&JQP z)j>S*yIG&h+`>XQnxAdA5EW^Qt5Y(L&{;xB!#*;m4+WwACnZ77g{M-U^oMyRpC-x0 zVu}&Z1x${rb|#&$$$BdqC_vk|zNz^`4X^<4@mX_{rknZkW(k-3-5ASyG??2*#0}kdhV~l&;;Q^@;8GB00&ct3)hpYn5f?9gN`?bifk&!86{hE7?f;3a547gx1E@mOl;qyb0GNXE`iA1Lc)F&3Q3dBZ%NScpgJ-^^&e~f*U5;RtU#*$kUk)%8+AhFc$ z>a!a7)^Y8~;Bu&XT0AvYVzXojFhzQsJHIb0DNR@O&%Yy2f7B}ms1}F`1DUEobTyY2 zJdJJk!%+JJ>SWMZlkNgPp)>p^@mvIqmUl<7|JDFO|JDEt5`#fZ}d24)Owqp7tVm{L5{7F-7|`en*l zSlE%$DJhlqwLrVa{H3T7`9zG%=^wFx)kTNm*t>^S$~;DP6~LacQ*<(k(av>)U0SbR z6|OY{@cb8VjMmq1vIWo}?lU-920csb?uxbYc=h;%Y#zbSi*T}AVD-rQ&~qsOE3&fq zy3eVmwn;gZxTI&jbSoRK(w98=_%3ia(z3) zZx5gEZQlu6K6b%gxc%DRmfD&yu873%WDKs%fxT>mYF(fYe&y_6>gW2iZnD+ zIaYp=?7QY_GbF_<^g4{Mm50W$s`yAv8l3C*PVEyp6qp3Us#A(aK79oCIkSAhi{zuR zk0fVP^u11PGWJoyI4saZO>g}<#`;$~@o|!G=2YN{9O*3Rdt^fVsf`?wp~7jYXPwR4`=^eD>Ko`rw^F-mUS5OqNJ{_H=-D=xY_{H*%5mZVQi>7D86R*d zco`jOnYi(3)gLX@8v{@$r{Cf2FB(DQvmwb4_MdAoQ{U?F(IQCxPO7TW{=XXQi%{I* zn8X|*$Q-Qmx(hoNG8`>)WHs}t7XnmV=I0KN-T)mOVDir$M83)!tVmp34n9*VtPX1X289F<*rED)n8qm^57(-Z^Xw@m9;A^<5z6L}n*3u4eV} zE^Yn(Uekb)t{ZddbnBIb6!aCy4&lRv#9~DE-}hND>r&$zpYP4ZrWS8;h67nx=(sfM zB}#)>I-%V%rFB2b@;GLN590!4E@_5?xz8O*d#%e?X=Ze_6gLi+eh%D3U>nJ7F>*V1 zYDaOE^n~di9T8#)m?z>97%LiPwOql?Wgn)0KwHWXP77`mc<}fGC_g=o6op^}T5Z#h zS7q&)kY5+Rk?T_b^zvF7Vn)5%?nFoW5^lBp^W#AS0G-4nCnxWsDCp})KQmEp&VW(4 zvLz*4IF;BnyM8B5?F6Bsuw;1L z{TMlgzAFvt9*}y@A5OrlH{}HUVKF55w7S(38|lDua>vvhT757_$-n*6ClxhC4MOPa zIy1XDGpkmEjt&D_TwFW_Si;1akB8?%hfWID+SO zF3xhq#%35wq3sgGn)uiUQ5)=+CE`!BI#CWhT2m=C1R)JLd6wmJ%siZzb-tf*`{eEV zlzH2NJV~43Kd;_;bJU&G;ex9jwhgy}<)JC5PXyX!ck;uZ2i#i1H{;kmgVOH;gHx>Q09lQ5a6qk(Q5Es3?HYgZAmhBBC%iZ6?U| zp1TO*EkNc|V;+6A^%6we-`}yrWh?69(S%Exb!senZQ=ygpY3Z znQV7)ea)LLWCSpslp1?@c?!8>DfrMmlm4AWV1obcLMSNK-x8eu`S#kiX=+X@IvjlH z8&Q$=+t%J5D=#l^7LnrFg&QnQ-DQX-C-a@061GCKTseO;;~VMltA>i~7j`bLIi6l5 zK*kJVPtH zkfhP&IyN?#aeu{!vuic+ZslxE5|)m<$t!g)af*ZJX7pNVzS?qN*&`W*jXhkH0zW>M za(jD}t6_w6yP2EjRX4_NypR{yvN2?eCAQXx#!GYLbKQ?qaXs4XLGB6V{rm!wrk8`= zPA0fmEg@x{v3It9{)+QE)sUgt4biXN^g(`QeU-pkJUllb)BwcF#~1N@Y@WRt((Frm z!U?3e^v1Xu`8GfH!OPt{{zWT=|Emdte!aQbr(MPYO)7)sC#UAV>?U~Mt`dQfE%^vj;O1?hdfMUaMZOSd>&urPt8#EcMoTgP+f_qrPoJy zbz&Ln_eqdb-rYZlT}l(gu;cTTq)!}P3xyvl7ujfe&+Yiz-JVo@&p+~qmIsp#P$YOOAu^Pk6O=NAP9gLoG;SyR)ulT%Y{*mz%?`Jwpyv8i?VzMo&Tr4OM0&ZMe}X?Mx1Lr2iD7#QE!y8wZa^tjSA&vDVFkcf}u|al}my%PhA5Pa3xm ziLqj3Yyh->z9^2d5R1nbgiy;qRT9C2$FC|o8#Sg8A8BC1K2`%32CrX7Z}e}pOU^K$q88L4 zZZl$WjKs$*ev*|{x|~7a+EH0f2Jo*?V;f{hl;@foEK@*$R(d$wvd`1)5BA)p^hp1F zy!gh(2K&wzEt8WkK!jvueL2wrVe(#^ILJMaMxAHwF<=b?0`X^j;iml7Yh7=!bg?z? zkK;diKKg&)2_1=BOx}mxH1M7^<);M%33EHU@|j6lM$IZw`s;}3FgBiaERUKVv4CYg~>cr2Af3 z?9E3b{|R-liMTjaiks07`hCZspx{HQqi8u+=s1NDE6ETSX)d9P5&URz^l5y*>YU&W zLwy&gund*S1U>(!(%pTN2~JIwNHDKzHcjANSd}XG>W%*_&?xdusJAUQETvUY0=*?V?jH2nh!4HUd{a|uWrmsgk~o3~odWy%gjXW#8v z3yc_<@s(+{XyEX`tj9=@e$)U^RCrW;3*nt41|&gL%E)G={dKbr)HBb&(4(Eif4e=e zp|dWevpr?s{cv(_F+7fBP@+L+>T6szp)o$(RBp!6(UDwYR1hk`8z80Fr=9GB=&nvtmP(?-L=X|8Eb?lDRnODn$EIaf!|a#e#R>(^dh;n?n> z%`f$uDJr=&cxf4*T4}J5Zfmc;!N1~YFNnA8h1)S>bY~)`O+xshD6jnr(_2Sufxa-$ zQH7KE_Oa@ePWClRdZ|LzYPT{fhgf34ayOL+F{}ob{zFZ9x@{Dxw;p{nz)|4j>D)>B zZux%p*LF!UtYhQL%%u&jmpn>h5rVJz!&=QpC7sE%+DHhPG}dWHZLm2?e+-X^eG-C^ zLl;eZl!zwn>FD^-wPziUHagFyj%7)4=ITB)n+wQry`UeZUhMcZ zRY|M$R8%L03yX{UZq&f2d4Jz(MDq;~rJo}Mr^XfM&$Ff#H+kpxv+)T=09O{{>dN2o z78{ZjVqbR+H+q#JX@M@TlYxdiqZd*2cJ6z_)ysyJy>Y$(oue2T1TC3ZVsplnx$#9$ zrF}h}MjftK)-!Oq>B2)b zwkR?pIXLPWc=9PNqGCWjfLH(32%s2HKpE9>j$nB8JB3)M)~%9m)P{iLNJCMp2ss`2 z2lrP;-Oa0)Wr0S7K@-3V!e?=WuYNRaAj|Vq2E+Wr?u8ux-$2%Z^6A%SthsiC^)Qn@ zxlebCKNrJ<_w4;BNaFclV)MX!ud7qT;h_c53HeTB=j9GeAGP$OT}Lvb z`E_;O0JVCK-v?pqO5=&Q`afG#$^2=Uv{MrU0w4j+*iDD|`p%arZ_!X*v2)S(YKIpy z(#vGLwo2GP-zkMlN=TYDB9hUr_d5Af2KWp%iNy8%)VQr7R3kh4qLxr9YCKd7@~!*; z0(>KmcU!rR$*R#P3aa5nRo`j)7tCgJ+G&Mm6eZ7e6mjbs)#; zaL>zw^ocoL4wSo5d-YNgLs@9HScqE8&y3tkmeZzaEsAcerBO4ueRPVJR8#>Q<P9Myh;XmYg#bTx zNnXV&EfsaM;^q}lE**#sRb;f)RhXd`twI=lcN=$>x0{H!z^ILK8>1XO&|Xem6O&*c zHUIquA>Es++%0nU0%3Vw)G66~_tKLAJiJ5_#?yw%V!Tf#W*-_1!Mqw--7;-A4 z^rv~!^lfE|lJ|?8@SV!#?-){IvH3?Fi-P3$(&In6BG8LTL=P;mi1>jL@CdB8h-fV2 z-Z5R7Y=-j7p`)!QRp>hyiX#AsWxAt~GTu&UwfR{`mWa|+*rXBwBP`y$*B!z`>r76v$)6#KpOw+Y}ktbJHYr*;Tsmiia!E#2ccMkZ%YBYCa#p6 z!OWo5;n4Agm6gb;0{NW${N9P8svxH<&^l&NM~hyoQ7|x4>h&NL@%9ne&k?o_b-cLo zB1}r^uwV@};NFySY}V<&aQm8>5d6D@5fBeAyWUjb);o;IemjX~$H#-yq#9v}4$WnI zKTNv|7Szi*wWJA5;ZGJXFlXJBSPKP4GUX;8MJpmm15il$qmuq`J9x=xN!ys^+uQE% zSp~7-?-=Usp%jv!1l3&QJh6qkKB~aWPiZKM);YQFvoD2Z1Y>+b&fLaUd-0=+9w-|P z-yj&6%FROaX)vheU zhAVm6+6uny`t78)kS8!Lup^Tw3c^ozcO|)lmrhNyE5zDOHV`so&v5_t^5DRK5lPMA zNVm=PfQB}_4u9ZG^Ldx5ri$5_)@q@YV*Y}b4hxjdg|oDUK9>O|K;QJ3F%{GeANLW*2F^U>#5NO1P6FYcpf>x;kd zS>T5gegY26^h{p7UIJlTNPp`3$x191eiBk))^jXRb~@%7p*Qq7df_$6Eq4nwhaQ8yf@L5DOBmuYf8AnU8ku}GG$mrj}ppDnMZF9hn&9<3ThAQ#*e z?N#?vL+Nwbp%u9Iu8jks$|LW^IiGBSUZMvaEKCVK13MY2@#4^awUDU00Et)IeAFCq z>?x5kj>vnLgMrvd)kcSjY)Kyb8S}WFow&<^cM+;T?S-SqLQamBzpxpSLAxT*mTTSF ze@{-m7EexP?fuHqzyl0nQ|-}m#_4>HY5vfpJc9Qq{#q{+!6W|*4U%9yB~JIu{!yua z`>u<7bnx1=tQ^VA(2!h$fsDrqX{p|>KKZS%w)QKRqs6><)TybMt+Ss5uSif*JVK~K z(K#V$s1nAzLRq z%)}I7Y+=))MXcI27=@Bj1kBPR3^GrNz5xwt z`pnFZ%s5A+IU1!O$fv@Z0D*=&*Lk(k5+|&>Dos3N>KCe!y}Ej$to-{<0|ok5?c?7} z>hd{w{=T=P|6O?rIR2ks`F>cA`bdMVULeNcdVJGj;*>Fc6=~-M3#gS7j9(EG+Yd3- z8wk72w`MoYtJ#F!R|v)%iM)iDEv|ev>R`^5XVUa|-?Fgf|Bxg7O%0t$QIgS9S?WwVBdT^=opj3jSsqC%_cZSJ^ z{Y(D>DhysHca#;ei7BtvmskR-BAZFASc-T)B-a4d?!-736|sf$XALj(}?7 z%C!`weT^la-=5;Vt=tVewg=m-;om)!4ENt2N=vPD4uvi6I{`bV@#jXYF|-v=4d+iN z#UZlSIsgip4xvrgNyD>=(KHQ&)?!UquH%*rLeg#fw9{?%GNt8o$OU z<2!#M=Fj$kd%Q7WQ+4z=~PEvN>

n+6tcEJ-gDvw3N$)^N|3 zYgJX-I*biqwuNEs+}y6VTHs%iyRZezmuiRHWxpw4AP3=^m`QDk0@4<5en%0yC7jFh zlfKyQLX>}le$n`LJYfDE{&i#`%qPT&B4k&^XdPF0fIit)?~YKuiB(;j=j9?4;Hf|o zWCr`&tKdDmr`VtM5I}zq0eFQf9s(j_rEwod8jrJ7*tQWG*X$hS+%8|0y^iHbUFj7Y ziQs&FXzZz*2paU}QCF7`Yq|6ml||HLK`M(9c4xCzjX{3@z^anObU~Q(SB&7WQ31VN z?%KLWv|3_)Cl>YHj zh=@OVA85JB41HErg$k12DAcymlmT``2*+ot+WnNDlOqz9_$Dh>aq+K|5e)P1=5+LB z<%@2@z8l+1lE!a3lyZR{YM0zGr}y_cccY$}*jB5}t*u1^x`Uq%tqXcA0y+Q)4-cOJ zxI%m{^oPAEUjO;RwSC`It}kK zPUB73rlLXs(H?1l@nbL^Lxt%^m!($cVRje!Z!!TdLhU;4Kg=^k8(O&#f~v0LIIn0w ziU%i@e=T9y(f)&}5pTFjEkX8twbdHD4<%Mdq&9M#okyx7=a5H*_c|D)Sp>NS(CVG^ zf-h;kDiMQmVaWMuwNxMJ%;N!tN>sE%b~~B5xJ0dk+Md!cre!o+4fN;{?3J`|K_OZ|38c z+2m+ZS2rec3S z@b-GaYJmySj7lj!eKLXBA48zAj^llMMUN*kY9ROp!w zQ?+Ln+c`xex8B$*UU&?&Wi16|qegJp(N^6A0jpl;BZ0l)QKS9}T1vCI>-~{#?|S|m zQ~aaMH+%6P-x8Z}9$FaoiACtAyisAjW{ty6*Gt&G=8zqzw61jpqy(49u-8a#o>xZ6=tuviWb zJ4*r%)k6#0UgN%~kItz@MW>ZB#bXm;W{7zU3;j5gbH{*e4hn@v^=9yU;`8%+Xq{~u zh)>QD_z5Otcl^D~&;pd^{^GjWgr{(bZQQRNA>7#Oo7a<`N%!JbCB}?MCQ!TkMBjj% zDX7SJ54I?w{8rt%Mey+`&*WUlpVfNIJWb7A057v3CkMv@Bw1WjlkXhtI6z{8qP~Lf zCR%sE0#!jkS`ljpN$y|-q@)~E7C_&?{N*ouhRPx(`IJ1Qn2|>ped5p*i*qzl@U{ew zc<7fCd)u_q*Vk39adzVRg6rH6#F`e*HIQILaQ*S`X39ylB&LL*6-cg}agutX?p|MUUcu*U5+r*ShN{|D7tykCFNF znjIBZM`5QBnv9I(8yb)dz@#C3-J{LV@77X@y|z$~dHq@-B0npOuDKxp*LaN-;@&BY z04&xv$!7s)siN}bo6n$niI-p)3yl8x-2uKNPKmfM>Vkror*XgkwZ4mXBc01MZ(Kb5I4Ih zG`MhPfW?B6Nx|G}e~BiE?3oQX>m06xe?4{A1G+I@P8;7;(b3$BvTR{`!N;2FBUXKn zy|;xr&@n~Vw3j81Ic^U6C@=nwjo7gq{^e3*w*{96{RzDm?D3VC?enT?9x4095fxuN zl_FG6W@mC`pl`;KG8z1BlXwFphlcju&;UfBqOuO;6}#!;uaU_Fi#UcAlGRp*h1{3d z)#r%Jr?Tf%AmkO>jZ;ZZDE|N-I`QVP? zzvoV!Si~GGF%%)z$1YOL3@!F{UcTT|ltQ(hm~;nypmHu9GJPsf@p&IeX}^1l+Xt)c z>Tcy7nW(CyQmSs?EH8g&&Y?Oh2jcfGxU}T%+yCvbHKr^qSTJC3LEW8B6v@7DHpcbd zkP&Nq@PL5 z3fx~dTNyZz!;>nekI^wEJRds~u#&uDAwyQ@5=v|kkyTWLVrpvIKQQ1ochCbQGu++X zjnB>oXJt`LmGJWNj6=^i?9@yBJ&Qai@)X8ZCIs31K6SNufE2v<5<&F@A5<2n@ji&+ z)Kaf+P4`Hg^Nd6Zwx*=sLq3Tq9fpb*xOgE5<0z#M#ZokIj<_n6B=k8}m-?sxUOX2> zAmu6bQn_!05U6L?%bA!!zJOWcbH?7Bj$`Cj(x;`mtmbBI1{R{RfDjtN2@S%Cs~xKU zEo+j<*MAinA%x<4sP4P_rXS8O#*UX~ELY5!zPNbyR#Fzj@>VymI|kXPe@jRR2J|~d z=i4J1RVK*zOj>~el4U}8y^3pLVbM|dY3Z~B7IS?#ncd^D14L?R(IJt*SedSD4B!eg zZlhpQJ{Orq_44oAM!3_u%()KlF~UOA)ck8JoB0Za=Q+6f-y16bP`5P6Z)76L%(7S5 z@6E;-NbL5M|af$yB^i0rcAyDLGApx85G(esR^z=d?VX#OgI+D!_0_jOetOtfZ=YPeO6L!c+}ipvGB&rL zb{uah_6|-m_qD+X@oP=>(2TvzGWtLzEmx|dI#Tr&Yt3k3T^)y;qF&!zR z(%IsKdhhr2U9zbBqinXVyk$)gcZo{n>sUrh`Hbq%-(~4p7??Af$Zt6TC)=dmKU*G+{QGrF0O*S34Sjt!))SP043Z&A0{^t#U`%6R7Kn`Oo^_*rlwhJM%*F!#; zvc~)kLF;rXeftr@G)YTT>rx{I>k~SaG6|@)_pB~l*{@*Ocl$`Lu2IIkN}D&4BO3OH8Huo8I|uO#UMMADVqedMAX zArO3ks|L*Z{GZMsr7cx8xZ&YpqXSyPY0w?d>&=?eEp!6oo9QO!A1nfh1E||rwt(mG zaAx$U$VdIRN$T?cF2C#x-C9hRpw~xTE1tXZK7dW2QfRXij3Qq&Dm=@({^{21##iVXw?( zi6!LbqAfW1SM=bE8G-1ovF{h>>ARH-0%Y@OxL}bq%(tR9g(|@T~P5@XDh&np5`}TMLA#1;-w$*DRF;%<2>FIRRd+Fp(?+M_HE>7Pq z?G+fBryMJ9ivECH_l&3|MB$tS3N+Uz*^pA{VK^44BuhLPgP>DItGjSp2wAa|E)g<% zAErA8-JC+?MjDBoUFaiI&X!ZJevC4UP^z?0+zDydY>Pi(Exih(0AIT;OOhwe3K(Iw;SxG3piw`g5)MJ6=S;%@JoQ8Dp2KX!!Z{a%>;*^5M8VhXkQN8^RGRC6cQJ)klZOvb!9pF;<=IZG*2M9d%PkTu$gcN=UpnySf7?cShXwHQ7+%WC13P%_o3byXqRXoF<>i?YD!LX_+`T zy}s|*puW454BQ+oUUwW%ish+}Tzu7gPWZv!j`o*hXCgyT`V}Ih8T{ca-h0x3ziV^V3K_~q3=WNbMe{2zK+)M!!_f*Mh68}h=rnG> zGXn(%0u}wxDpfM!;p_gqcA^FYTa$!rfUhZi&LAqOY_513H)kv7SzM-`kFe!CR3RsfLBWgP`}2TnohMv&1q?$-BcPYt>hQcd)Zv>Tj~$l7)Us>lk#~H;&h!L(cFko zaEPa;xv+wYy@GD+KciQ5AFFu#WnFf=9nMS#{#zKltC9FC-6Q#BeWOLVjw!d3whS2< z)=u_z`~HJ&zcReSX-|po_E+o_y@SBh4YXk&kodhZH+A8B$N$?EN&ey*+I)YtVXse? zcAQ1m8gXx@^;01#o`*GU_oOdoRj+jLXyoT6!xR6eeFPYsV>_DEPxn~b$h+mf<*d=j zfOk;)QXHL5A(=37Y3|kQY}-UHer`35?)YZji-OoBpr)b6J(#UdCyH-u3craIM(?zD)(^J0kdZd464KA%d`n@m#JPF zX)ELSxUPTbdQ+gRlV&*JMmigb*FB`l`;GN}A@SXPZ5=>AyME0Vib_v^+SbHl(y+3<9_wVm6WI;J2l<_=Qor6cM#=vfrYohR z;Cf3&o_xsWyxPHCHsN@1;H0_`mlOT63RRJSFVY{_p@D6J1*DNM>k$DEXk}P2bTL^n zFJjZdUEy0hNcXV-1bV#^i4+o$azkf2t6HL>-`YA<-T264=_MNv<}Z7f5xF$QU5s-$ znzsPIZ=W!rM=myBS?N=l29=d<8mULU=jEN?WBne_;NVcL^KOGl;6NT$b1gf0`g@9A2u+j-Z%it zqtk9o=yN{X`1sf9gl!k-qFA`SqADJtFQAouSV?(5@4StcVPg}h?{?Z;oC8a9cYk(< z$Doj!X~(-`0!%*6LIBqgamn||G3}uEozF53EW%EXy^Yjb0UVhNy2v7rCRYniO66aQ z5&Wi&?VbEXa#AuEV2M5ozj{5UF%6+Vp0K33(ZX(T-(OC^o3;`*XT|*>;fhllanY=DoOa@2J->y_!Hk-i$sKFx1?;5#Hl z?$AI{sXXm|;%v(b{T-QrVIJ*#|2D+k*ln49-uKHRb|?D}nO)RQ?ibTB!25Z#a^e&6 z?c2Aw@+06=-^_h}R6ks8U7A))PYg*-U=qD>oqA};RtOsAzYQimn0Qt}d*me9?P z?cUTBsq+fq6h}7*T~_tI4Ohwr4*IZ{n0A`;~~t{6env zNBq(#sf4O~EAGda?J9JO(*^Coe9ZF8l;H z3=7lvLxM&rS5~;`4tq3G;)5>n@TNFFXO4xC9WDk{e)v$#0PucwQ@MbL&wFu}E%jM? zs3U2q2Z-SHpMEC-V8BOOIWsfrSzy%Sd@#EK1Pa1n4ft1>kCC=K{2p?9y4{&rTeBmc z`zjC^d_kcCUv=8%2d}{w!@Fm^VQ?+)!TQfw@2oE5-;q052J0BH1?dkBNK9_#mw}wP zwXJIWmC5D$hx{jll_M9Zgy0=^QBjdntqQ1Pwbk?NIiv7dt=%Q;AqqMLpm>joH#$nH z@QFhn&G+uln`njSVQZSs#_rP$2|LB;nUOTA90uC@_STSMO8RS8emQtqyp^0B3tS~x zvtO!NCIg;M1T;xs>(l^(s9SA>f58lCh}xbhP4k#p*X6kKrpJoqQKtfmRE4zo<%cIFz@q_!eP0TFy*R z*ieY*4UIea{UlK3q@96Sw?Dp2+I6`mvVh6m%?9S@&>`k#v}b3i4DTz=*#5?b@1Q?5 z5X#j?13#r-Cc%kwTV0-jC7Jnbi4XwqPuBx>Q){c$h`d_bOnnSJqP=k}#g*l+NKMpB z8j|Lu{_d!ad!Fy%)Oh!k`(^Qw17ORvqGfX;mQ9VJ4Jt?Y{ zOeflqRvoY8IcsN@zUWM^7oCv8wERV5SB&xbwP<60eyv7Ub{qmG9d0Jadl8BUw`*G` zl)d_?W!q)M5$n=Yw~JlHLR&ycNS8bWh+2RGuHpW;xe{+!<*$s7ecs0ZQ!j!*tm^_8I`Mlfp;V088 zQhcH>w2FM3;!CC-P7Z0?UAdHb`2Om{p3{hB9AD3GzhMEA zf$^zQGgH$I;6Gt3e{@hjT5BhJ_9%ZoP1v1pO{kU*dfuMzKsRsCV=TQuBs&!CmG7sZ zog)lt2C9JHGmi^en}LEg@8VLp-N|ZXG^2JfpcorGyNWGbNsRRNCj5P0+mxLWzA->A zq@fh%L`Fq$fPTz_p6Zc@WTT2{UH`o(L+n7-+qkl^ah2977*@dIO#$_T*iX3}4Z@Gw z#f!zthrD2w9p$SDhq&jrcc$O==f+lI#g zKfMGSsX0f!VycY7i37Fo;xrG>;}^z3p6`mg7o$R1)u#4Xh-3R^jV%{WI(+E1av1CN z_t(13f>drYcPS^$lacek%BD7ALtJDtHUC zoVo8lY!uPxoECKICKNvH06(5Buvnd(i>L@Q26U!D1Fu%NPZXF&D zwnyAtKQX;))k~V}>All#!S#dbwfAucKbcNZPvhZBjQ}1aK9{rFZtMNnaKaR-aOcE? zu+@^1S8vr1%{a-kj!tb!ix+R3N%zTp>z6mQE~v>czb5Z1@PBf@m2e#ZM(d*)tG6C`dr4yRyK2ND$5G&_Nkq9()8zjpPuK+qQ}QX=S9ZjMOcnp z*b1{Q!k{J--p{jSK}Op9x#c$s&?n6+{ettQ$#63wR>z~@=YOpaL=I?^jOjk80;OZ+B*vViJH8kRld-S`O4&Cv#;z!C@#Lr|qd)G!8Q@mqn^+DPc2p z>oLf5s^CZiL~P_G5ZkKFF|{PbNdQE+?;(NQsfnM6wxsnZxu@8@RFndKEGT|6i`sx% zt6WtRDXZ71=4>B|1aJ_YUtR5@-JtcMbgm2>%Kv~>8J%o+Wa=Ge(4mSSb4s7?rG=Ld zm{NP|;Xv9uk*{8>6}=Sx_I;|&rNI6Dh%hS$N~SWFAheWbrrbc`5915a7)P#k`bQ@; zOei|Vic5Z9fa~mZuGgwLG4ZL6agK_syDWNHS+Vr(a`hLioq3PnRBcbchb;C_5*H_d z>%GDV9{<;tr86QX=}&FD*7qwzaG(;cT9jzPCvN7p8>2!Wa~H0Rv4aXic#I1D0x!tl zw=?43$_42K*X95e^qFbqe7=#VnP1?re?pn6Nr!pa`+7xrk!$$m8tRyNPx9=$-*u8G z77mz75vZtbIs$dmA3b}RGJs&ADBk-EUbcsuLFgMygVToa!&GnNkKuus#nGs!&3n(Q zbrxCK^)!aQ;@Pf1_49G8U}~pWX~Rn~drx*4Gbd*8JYmR7j7CKD!y%#q(jt$L7ll*? zS~J$4HD_Q+6V-WCvwrZhY;XTsq@9t=UYsy4nipe5sja-uyG70QdA%a5B)H{`*SFeeM6F2socyz3=%Gw{cSd0w} zKs){q4_*KVslC&APM2mzFm*16uYedWxq(RALtJDYNXlcKWT1i(WEH-a-;T-YIm zB1)rZc%V}d5a7(JQx6K?ZGwy^8$ zBF3^glwxaR7f**&PIBHHtYk~@=`H16Jg;?}NEz)XZgKms^|? zp9l$Fuhsd2Vmb@;`z6%dDI0b!EKVd*$35+?6=UQ1mDRHNDSlENJ?+#<@HLPQU5|(% zIp-y2NheV0a+HRXOcM?STF3T9Ll>SSP9LUXw=OpUI*PrebFEsv`;!-+^8+_Rj(!9f zz_H32Hc^%xhL{>Q!x$<|2ENgGFVHITkz)GXbdwo*dFc#wbinWd^$5UyXUyoIxqEl2 zFSpe7ca%tk{x6>g4n&Ta6(X_mB;f0>GKv$UX5zPH(F=+)+hzU+2wfk}1^=Aq?(FXE zbpj=8Sa%4f^yN@^_}SUkP)scK;Tku(fBtLIMAVbw;_k^V;fIZW>?1#RP#TbNS?_w% zdwF@;VDaSDCk>vMqlrf)u*Ihm^9~Zn{_=%MiARMw=>pd%CurT0A#ib8L^abFi@5th zu3K~*5rl-;n=?8yKj{AJn`Kdf{Y#P`)}v+R00S6N6}hO#{d{895e+oXJ|1Pzff>yu zYt0p97M7r4IL#GGDZIEi9HCR9;S^&PNT*kV%H;~NCz0!s4@VlX_tt{9w~ySZ%5k(7 zffe>2_XKVhhp9;$ahb^8C!YtSH37@ff53zQx*o2qZ3A_JVHMQWdFccsn zJgcFBaBFL;AMi5Xhcv9%{x8r#Lf|*{bB+-R%ZTJNtyPy`y6%_53Ph6K%dGXvTsaxw z$cXFdbN$N&0Df)x^e>Cj9|ZagRHao`Xd}O9M4S^hw7->?o;E%eE5;J7%JyGK?c71GknxLIOPUi$bK3f)wm} z53a0)X)qDVqoUlvjaLHiq|;P7U+xFiM`$xx3{O)<ms!0OhKBz-+H?H1N| zZA0gm0rGDMQ7kU46LpcxX-1~Ubd;L;(@`>AWYwaK%&AV5x!|Q~xzI8g-tDaagwwO0 z5sYmGj9+wGxJ}P%F)^Wt%`Tl}D3;Eqe_;oioYd2!K@D=>Kl~1GEL>cvi-0SZ{ki*& z;Zciw0+dTd&B$hinD2B}8;MAu-AjP%>UahCkk?vKEOc~2va;yUQ{6kj$P?h57ps?N ze8b^jU_kv}HuI?;=J(Q4bP($y#=3k^0YuyGwT?miAMFPhuSuN07hE;Uoj&Y8okG@z z@J)eXgFrN{xh1Y{IfGi8n!iIo=fCiw(c^eZ8(*R8E)RFe?NMhay zO-0E>Bi5JdMZO2FU1$vDDSUv|l3y7!2cSQIGN>D=S{9l{FUHy4TrrkS>~t~ZlwxDS#gHoPU1%^dDo1@33Ks;oc&m4er)I6_`XIZn*uKv z;v#4Gn*Ok?HoaPzL_3X!*NxXxeoxrk#x`%gP{I&I{a`%rehO&~YOFBqsr@EkmF_<~ zc>W3?dOEY>oa}TyG0G|nYX|JItCOT_zaq{Z`zl>j9Ajtyf;t=kb_C~ZQH^1ALse<()y5jyPlUFQd;s;l;MxD(-E5*{h0a|ZG z&G^Rzg#>&hgKmTjngKHr@*u|h|3}qZhGn_6&%>ywba%&1cQ;7NO^0+zcQ?{0-Q6JF zUD72jE!`>I?f>Et_wRj=V}JF-UfkDObI!~;vvBea<<|#xXb{K;>3D)TyHQU=wXZ(R zLcio!3=lvWJ|AXubo-c{{AH}kFoZQm?Cko{4St5}I>Uaf6=z7?+M^8!JVRjl;1k&{8)}q?@6?i%q0D?vOX+c_zv#nuW!SmT@l@}oL>67iH_8T zArHL!1<(=u;~@8rkNcnhUy6&V^TtTPjh1@~_5)Pl=0r62S6wt9u?;qR5h0t!{qMZ) z_YddtDbd7u&R<$xu2u!VyId4Tc`Ee1DV&KblTP)ZPZImNdx1`I2x262Ej*Ys1!3k= z3ea>?j{MLHwcY3pN1SB9zA8}#IU*{PM|y)}&ep9sNpfOR$jMx$^|v>Cuj9**(~akU zhgEccjxsSEG@tEk*E9K5hNs5<=k=xm8g@GMV(ckS@+rg!*4DY@MF`T8J91l}@0=zE z_`zweVy?!XeB$_0Pes^oqFw@}JSmH{0GPF8hla-4SifCxS+euidh%7LjY2nVOJy6o zV96dX1Q}l)&w@wQ=rP-xbl)6l)=?LWSm;W?5NNg&2sB?PEH;^;vyUeG`NqXDK|hTG ztodoASI<_WPK34l2JY_f-=S^@5Wrs@yY1<+0@kACe;Qx9Dfd1(aH+q0A14e(@U$KfYpv7hb=$WmZ-xJ9ln;_Pa z?XLzF`IEsFWm=(#GU;8ZY-|!>!FQjp_a+PXj{5j$RB|ZUl(1BmmgqET^skx*Id!+1 zMGJ-OK7A-gQIs|mYZRdH(@jo=7(Z3GO2TqN!p-ttr)u3%^@)$cBv_bEnXWV(}!O~24? z-VzJw@8T1P&HhAc4hkCIIi;*P$|kObhs)Bp`FV02L_73csPTMxKF4YT|Jruy_wBRb z0EDOjUEg>RtMLZ74=yMjb^2}K7bGYqnDaLU)fef8SKIYBLt{2hhi~wAnGz2y!lwE7 z`VpgU=eG0o!W|$ae&y+KNn`Iv}lTf93Mj)q9G+!(7$`)(X)P_#Fddi~kl=Cua zoDak^xh5sv7u7)6QMfxbYFxa81wp>y)$jyK%FlLKXVytZ`Ge|}S3~}U;)3pa|D`z; zoj~~ZYLM7B6n#?`;m>vR0ariZWE~K0-G4dl$4%6Wqm<+6&yCC7V0TZVqZe*X>|lVq z5|u0Ze20gVbgc7#ax;3bs3@ip zI;~?`M0uePyR#Z*OGoZs??6J7 zh$=Q?eE7K&h-%j59yq}~h2=4)ZjWqc!)rs!Qzy5#ziZ_?c8tD_pj7R+(9e*4bwQk$ z{@Ho`?EH9KQ>2svw;HY#j6MHjuwP8Of2K67^hA%5Asjb1u?M0>6fK>FZ~WF%E4@-> zyXo!QACfW=du&+VJBau~N&rl!c-3e>OU-m|w57}7bD9Db-&|E3~@$vQ5p!{Jqayhilu0@ND{n{Q1}aHg zAb2_C&)YL6Xlws$={&u~#>SQ(83(#xpPO3wtum_(xwvln$t2emOh|;Xsh}|ZP9>{r zPEX!qem1l!XSDit-2cR4?B5orHq@<2M@cyiDj?zDO6GDy$XY0v6!(6;&!73=fqgJW zW&6mu8>#MZR;hk7QMbIRMLgl`*OUfJr{-vw;=mCW?fH{?bViT0grY&p+*EjZ#!_XcgxyqsxeCu zJSt^f91M*RHJasA>beHBKL)!v*eyQT`8Kau>gW)*0W*I0$l(|cGkS{7|+8A@Sl|AeG&NKqSK#rGUz9P>_E^dv&APD50mh$? z^_JZ{Y-~bQcAsn0PBs!XeWg>Fw@-(Gi(0F?CO`ijBHsnlCZ(6_=fnBB#=ZR18~XS9 z2NYFmR4+HK7V-E?QV5nE%Q7eQYMts>3tgV54c>DEUz&{ZC;RUJM+->$h;j8JdY!E$ z%ESZWcfCR;Dey z^Jle#+{u)eBy*XQrL<+^PidW-h1d@S>AAU^jF~lzLX$*3MY75J{Sw5G$Eg&Q2f=|q9$6uO?$sE9i2;gwZe@9{m4Kk=HQ^e!}8qd>JO+VIkn)-#Kc^C z8#&H|gru((8DHp+G+HofwLKnMlai4I95;Jj z9n7gjc*Vc4cn<&Sd?9sVge_uGgfRrD)3qdcxVW&LmtBDqbZ~TBtLi5}y(_1q>rJbz z4Mt6gj^Ic#9u`vMnK8(qEkOO_yu@umOO0};2tPQp@1IUm{rXH1IpkGQA(V`t2RHm? zuI^ooX@Ic}zZKprqMxUSP+Uu3b{K9iL#U5I9~m#Hq+>|G5O`Xy3xzQzu+KM2AmeDz z2UN6D?RXVJmKo22-5q-TNTJ>=L*)VOL~-;qj2NA+4OPY#Monf)#IH{*K-yeCHx+wP zt!Cu8r!JuHQqZiv+hVlVl{BKWuoP}A@Py@gG>2YLuu-y;0h=1&iHH0xrwMyyt{I(Y z|93z;ABwlM{m!}oeJfhY`T0noENsHo=4w8fq<`CUWH7)0d^O8BIIwWPEYU%~!_pVf zsxb;*GJyK`@Mj;>{w0Q>6s>yAzhG5zD4*jyIdvD{&_6r$#KY|f8vzRYHBhrA(ZWdw zpPEa#!C~od^ljsnDjmiq83pW4{;)bmXDg%hoPwoqj9`nbu^jJNwL(-Rd6AT(r%a=0 zlK@4dCwRdcqFt7Zmig$yz|(9Wjf*sMw!w!UlBeU&q8;lS;%>BWIK-6emXgEYV{<>M_U}Lj#J|k>yK~*;3V`Zsyu)N)y z1Ym~3d|q<0YE&|IY*Xr@)dK)UwcGEm839ex@AquCu7cgYMegiICxEzIjQ)6D*nau; zI(KXz7ngl?RH?*h_`De~o(?rV=DmOa{uC(c5aAnZyRCX1ji)Q*3~^qN@2Ww zxKcd--G9VY5KNMe1mgQ>^FT)x5Wy*%!5a*yu9TJ~y}lX=^yq_YZ@TuTdEr}I zyYQ%*B)f<4(LyzY)jv?F!G`xRqhv~i#F~!z*t)9g%7mY6Zhx9FtwbzPYe)8+D{!}{ zBJ%}zCWrqV5~$f&cMF#ZJ!m|_d()Jf5toa1>%Ptn@Mzo`& z833Vxgp!5Yz~g+>^X0e|bDGzwMN4JQhv~{hP_T2@?epI+RzxJ~CAyMGh@|t0+8cEv zy`X1$Fd- zYrVIYV4#BrETt%>47Rc>k|6>BN-{-;ew+1<3AN%Ee1B=*eG$sc4{V+N!7>ASu)`=# zl~c?5yBl!xh|-bvyFC@#wQyFMGHO@3Efq?xfuG@m%$>8Gy(Kw!82STB_|5ChQ1_sd zp!*AZ3yg|?Ko*YU^pT6|2a^h-rFSpRW+Zn!Z5!2Il<33){zP3<8JVxh(QAOVJ9%d3 zU<;Nn7zqrTsraZIch58?GXL(~$p+t-lb}I~r7vbO>NssRwxs+G4pav$wYAm>lfh$V z62JIQdr(h4-iAUxUwQORn<#BGOL|qj+)wB)fvc0@$+upkFUL6Ci0ySL8Rf~k>>9!C z*1;)gyW|Mu^FQ?jqq>}raKzVl8+D_f??g(Ph>?)O_v`Qb=SNeX4KmNadmry4aYdqQ z{qYsq$a}KZfg!0iebHZn(WxP|Q~8{4CIeBjxYq^>IJ!ZC)8@7uT|z?*$n7dxK_ehh zjOa@LQ(J$|Q6W6z6#D@Co{b`~T34OhJ&*sFa?UH~wHx%`NB(48t){Mf*x-3ZEZ}IN z7*Vs3L*-%z$alBIZJ<@~G6aKV-mO{#FXu{KD2C|uEo;Z`>y1Rf-WRq|c0s0E=5q4= zTrMtweRQsX!&9vfcw=207TDbf<8|JPWsdJ8(D&wIYWX+q?J3rF4Q#Bh>uuU^&6Jds zoB||~7~fboq5xc~>df`iT#YdU)*qM`yU-rwOFat|2`%3b*H<4NUA9*z(;#f-3=@HK z3&7O&rX7;Fk7=@xP9r+XHU_a6$NXeX-Ehz$G$>Cqw;R>#t_$z#x%FG8vXMGfmqO3# zWD3cG8P!*=Pq3!hfb*yD(lV6J*+uOs6mu!=no1(BxXY;Fp6DlMb!95$D@RU- zboKwnbf_Vfli$b?0t91t@X&7t`%O(vRlln;Yz$-J;B=W+wC2|yV!+ZT^KjJugzNN& zmagH_1L%{!%c}!o+pRjlrIfJTs*tgIZY%e?FGH&l*udM!~X<^yw$8 zU|+Fj0SX8>x9RJ&k>u>?>UFk$T2^>}!^Vw49ES*Hd%O=J!cxwT*L75cv+Vfq^oy}n z*{!>G?~`aOhfvGDvZ80|DrC-^hiiDJFh%>wK#Yp8*}fYt71`lQY2~WWsx`T2Wf6Os z=~C~W;uxC=H})SMn8tXPPVXLn*b8eTa$*Ir%h^ma~X9hWLXI11%+|lvUDvUm^Kj*Q8t+%q(;N25#W+CniJ3 z{5OMqMNqS0%sxfD0;sxFygH~|hp5Hq=^k&=2m1>adduuF(K2$qGJC|`%=-N?GsmHS zLL+By^cYEdPZuZjrcnit<{lmvt|=sP(PPd;D2lbYZ$e$Mvcq_A(5d;t?hG?wGo5oS z&jk}60L6D^X8t{V5T%1d-TJ8<`iiTVPNQNVh)gviidVMG%_+W`EakDbZ7soz`2@$D-o!XalcbC z);b2NN=(vBv>00U#%hV)DnaDl`1?;1oy;Go6mbuBViPo(6&DL88!gI|l~Z8l9Bp%? z(8z#5+E0m)@N|9eTJ;8}x5ZUN=}>12OC=8`@DO7=;7DnuhqeQ`GGYp;w0N}|vB7&YH zzHckVPD3fg&RaD!Xf7=2>1V*DH36N`$Pd}oD1WT4t!ghq$rdJ2P*2CqXV=Yt&k%R7 zCFP*^A?|E$5z~MldFlbn-6!*jzG$1{)@w7x^afxjD6KUi{eS+Vby~mw4vPulNBira z=vN}}j|s@x>4}M$r>X#j8OeVqZfeYl2T;CA%0evEf&dqQ6OMm&>n~yqT=S}>56gMQ zJ!p`iPl5x4s5>gxduWpxi?-4eu`82`O&Q4$)A0pchI;;BIn=wr=74%SuAr8TBsn&e zl=U6*cS4Fap644#5fl{pFl3@jep(?E7JIyT7X{7CYj)qT>4z>JG4xMOS!X_Z2gYY@ zGqdLWj#xAFC+*C0=S4+HuLB&4%SHLVEa@_7I-gj#Z?D+d*QaF#(C=n2G1jlpY5I6~ z9Jb{oVF6MF*J)}N_E;PGxpC%ZjV$rAv|#5aI)L@X>U>s3lhO1nD>OaNsl+EheL z5RgwS41Urr1Jzwq2^_&e#jF)nt8Z)LkRJ%?_n3)MCHzYa!2IC6Z&>JBy0Fkb+cd&PY~LST8K0u8}-@_WD`7!O~kzd zBypMOD5=Bz#=73-otp67p{5C_V16bb0;;QR53jlK_by#d97NxGHdzw_%xr)@@VU0N z>Aq=GReqPa#4kz{ro)bb4jsHctK)@F+rv#+9b?Zf!X}sVih{;L4nKKpX!QqniWw|t8Bx*n9MjiZ_K_&=wKL<(6M#n zCQxlasJD&lOt)f$_=1Cbx;s6?1S6vxNnfSyu1{d# zyKH<~4cO0Ou+rdrW-NjAYuKF0ge2eLIrERWAwL!0<`Ut%y7s{Kt$x;aIa?1a@@1{A zt~08RF{jb@vk&f46ew`>GBOb@Ak~i`vY$2&uAV+oPw}+1q|Wxwv(b!|nkEdgSu|A< z-7(2QJ&AyWo|UQOB2sN;5|{Oa57Za>Dw|tXg_IRtYMY}C5w-s5T09jRN zjg$C$T{$I{;Oj*1&^Yo2Tmc4#pdiV{?Pg6En@2ZJi+Ze52e+U6jNX|CbABrXykC8g zt9ZZ21B~NxBXR@1PDD@5ZgN=sWr|0`ireoprV;xE%LZu6zF&^|+GsB$q-EBhjTa&_ z#|R5smTUYTsRM?TYlS#eK?}67$oZzMmWnP-<7#g(_7%u*iA_)aUJPLlFEZ%9TF%!r_AAhvT z+;uD#?#78aj4vn$qj;Cxo#v;rfi9MhchjDpmf8OKPW)?)R<}6gL-;$W=^4?)H--Xf z&;wn&y2svo?w|K}GQ7G)&wjt3zb?30W=fBz`f~y?DGZY zgW`2W?rgLdea8L9&Id zv`&iwEgfrDBVVu-hLREEzm!sI-nnibf6tm0xHptXT^(Z!mQ50u-jW^RPw%`S!mOK$ zFH=HDK3<@TK-W)2nS_;P`Oc3xza%ZO5Zww9<#Jod&|9LzalHt4Kk~MSamFCcP8Q0R z*9M4fq-Mj`$HLkVASI@XTF2XxnjJtortz4t2Nq;)&V$?$apPOrqI1T-(G>W0r72{4 zq%1JFaGxqW*2(SPRL8^=P9|DaUr?d@%t!-Omo&QSmS@te_3LA>9)a`U5c?gnV( z@&yX~FoU&OKQ<&9&U_WQKGF?QClsP@ZSQJ+A2#q|N~@g`mzs3Q2fNO%rhwjXVTI&p zaNiiVPsq?yqVgklWe$4SVf|3?X#eVY{L^OR=B$DI{GE)g-Mz?b?|v4j;8*8oiBul= zts|UscAvB&x5;G86xN;K)+cw>9cL0Qa(tI$QISJa6(%mWK@z`pAgH zf^N;)gzI@uO83jK1n2w*TA7A`_m~$gFR$04>@;NdxIZEH<7sy~kNnC44W1-GmG2ZQ z-OTib1G)x#vJX^0%t-sCzOFd=UFN8OGFRuU7?DZ24^LaPa`L&6`I*a;kQLB-NSASRV zPs**`q@TmbQZ~eVYUqpw_xBrZi{%LoB_(S+6G5q=q0t5nL0H7=#&EJKpRy8YNUfql z?xvS!v_FQ!_S83`QPb0JIwSC13i97oZ}T-!GayE4xihhkr1Rvrlll+Zh3_r`d)i6c zYSjk8KNCQvMFT}IUnrc6jV&Dp`lO0 zDva_`XcDTOR-ZzpUO9%AQq`f1zo{R{kF8}IsreaKI!(^&Xey)R{kk}6>TnYKyH!>h zgGLFauh^EUt1Hg;3^Xye^rIs5`GMOVs0roPHaEn;Sw_?-F>}_a%Jq9uyn+_YwS*qm zx*Yo+4mfY!Ltn|Lgl`Ez=n0R{^;J8TGGx$G%V%1C^jmB@i%tKSEcp>W3@)5JPP##@ zLU-QH^K5e$mje#|0S82QXtMI>agK&6*oF7a$-QupTyNe98H97WdcgGUEt96{0~05+ z?+UOqM&Wd9L-|YB~V^@NJ^SDTukd<>gvc?OMJ^TM$p%&dpz>HD4g_;Yywd^g$& zTN3tAWrqlI?!$UWP3I@f0R3Y^cjyB32CH!xU=v+eCK5 zqI`jD32}ghhn2IH{YWk@csc*BNLsf{iUJ%^XLEi|CB;&_!0*#vKz^hU$^gmW*y~ti zAL6T`MHqL_ij*P6Amn##HB~_{S+0&((Au^{oq&CL$2*uZBtb8vwny#=8GBk#Cj0pp zPvI!^*L4;+`C!?aP2(`sEX81DhYW12q?~w>kD2{`>1mYJ|I_XT)=#Z7IDMSWdmS#e z*Y(_uI(?JMn3i|33HlTPB1q-_I(KXXIby`F>0|^R*Jid@Nk0CGjHKWbTHMWMtZW}^ zg1&);tM<@{twC8c6(Qlh6yEilL5)qKeL6A$Nn^pc_0L9!kCWjO`hDrUy-DV zXJen5Q6wZhdE%K{s{ofj@8)RhhiGRQsoO}By|~RR613#`{+{}jVYIC@c4#V71M*{;d<0Zv1# z$mz#3GcY<73`s2S)xcn4vBCpOi=L89PwI;@8rq&i8s#2`@KeCSQ2>MtE!D%n*HHJ@ zR}OvqhtZ3_t-%x(!_ZiElqok3+Cp6%vg3;4N!* z7A#Wsd9yc#WEBmWET5|mEbg6{MNS3wnHH872smz-8ry`nCS(kcUNW((Ur#Z?R=A(}%M(3|Q$@3Erm0NCTu^6pq z6f88|>L$oo1o$~Gs)HXw72L(Lp>t3pi9sfiLabAg6q3mU9-=oX@#RP0IVM&}9gxnK z{HSX)bEZwjcDxGUCr6t|)ALo{tfl4j_nY^tJ4Y3qeE5i^8t+pLjUI1+naic39ARal z4gX_P94`b&wXD%jm5 zbXEd+S?N8Jhu=Qp!$_|_)@ktRfb3yiuI%TCPN}{dwon=!8kWGT?Cf2F4$%7PZRO=A zIFIfX@?pswFgsvwZJpTovc9NKOx+V<;eb;;5>cQ>Qc0=n@pXsqu$23CPN>aej%ej* z2c$VKOAZy-ny_2DaNe#V1|<4~tMr08YW@E`2gocBShob;2sIm$ffN2$&%TA}hwBxu z_Me^&}Gd!z1GBLb!zlqnNoiAobh#*Rz=j<%IYzOlV(kbjiKIQrd! zuDzkajqlAXGojQL=sL@faZW;KwBN>7c&XK!jI*RVKXsBXCyEW*#eBjxscidt$42t0 zmBUDmUD@(Sib%nTQP2##1cU;fDW~-~x_-FyrJ@XiwWLX-ZaSc@Rs&Ph`+;#RGzp8Fwf#dBUm%}6F54d&=02HlW9h#~ zCGsHr56^zZZrCWZf{0AUAG_p=`fX2n2spB9%_ZIi{x8L;)+Bnu3K%-75ELu8b9b-Z z{tm4aa5+x#+;V%@ur~|h zE<^canP!5`LEj?wtK?j6iM~ehZ4PQ?^nk2Z@GmSD3@&bEpOk(wZl9Z_0(%gUb+EI~ zXe!e%8=4uHm>U`^^*l6{>r^bT*m5f_O$R)4va^wdK{|>kj!N1Do{g?7^r-?~Vn@Q< zS|&~{0Zw2rc*;I){VM-0MBgv)2Fr^olKii<9*QJ?SZJ&Q;!`rd4pU)5#pV5@%kQ!V zk_?R)ko#$<4%%I#+*wi7$QRYMXSFr>?LKLg;u z;Vo*c;>)37|9=k6qR)x&4FNNc=cru%PtT2Tj+~S~cfTt>%E&}a@Gs7X9C0o)Feu~w z?{idjYsv<}*89a&-aEXF`2I&L?bj{oMFSz5II9_mLZ#a2lT3)Ij40e$mqJul+Ks>M zgkn=U*lbcfCXia=etQjbHFi2Cm21e#n~o_!1C@fyaTdD4Dc7+`AJW#OR$EITD4BEX z3Br2~xk}1R&sP!2f4(VkOp77S>)RS>wk0KFH_s5%WTesSshe|58UGZC@gMn!vXJKz zaN?4KzAT-2aOR=H<9o(%UL|GdcjelRjGMR5qf@@*QvdW{l?l+*O}VYOZ42|>Mp`Wd zUjN?jV5J(hhlM%m9!agrF8pD@$}|Vn#YgF39_kSn8Pv|`jHYrR zDwnVoa{;l%^8HG#QJ_5}i1ynT3x^*0f1`X75~wl=g5AQQ)Php-Hi;<^Q^%dDxT&IP z8z=kG*8UtznbV{2c+aPQM{&L;Wb_RPT4?SQxkPd-p+JygkT%lx>3!ho-c`R+Rbg~m z?$<6V$}KeP!i5S&(v;Wsh0qcv6ETb5anzusLlkMib$I4n-YN)B%Dbswp4vO2^JQN_ ze~H?kf$erIGJhYYkSLg~p>gi~u+=-Z-is*^O8T+hMuiP21NpiH&L#)2V1H|27KZ6f zn-coQIc=9$Fwf7%$zgYjUG~75O8Om&t>pu2g-Njc5m287ac6+dM=*v}&cXB}Y%M(n zC;K=B$#gWaq_|S!9G0Ae{-0XK_Zb4vuhz#c7b;ckz5ZcjuhU-DL8Z4u>wXd0#A_>D zo&rbWTy~pK51!NJ%zX$w1}__eBAWj_B1uSAeZSctG8nm?jy_cmj% zwuiL+n4F>T;@+|NOi|n#ravo=kc|ehBqJ9+{O-$=PCGus;XRo_{*xRJnKc=V@(P(| zH*FJ))yRJH<(A9zJ(cZU z^q5OA*tQJF5qGu=W3s#oF_(^XhB4>uhA3Qk9hMTd7kc6nbYCZqpEU8{CX+*YxtvIvww)b#h(O_nc-W^buUdQn;zoUMj9PJClmCzw_Re)ohVyR$w|yahW<&3 zr~ggJfmyrBG@xsqd)>hL^SjlBmF6y?#WuZO5jyLx(;?<% zU?b?d-nxJM%4scWNw3o}6l1L=m0efct$6@Wgx99ILB)hY`#a}8SH1PhD92WGbo6y! z9pyi@{{KFBMt(nMUX)+6VG)+Qn3H1dz=8zvkX?_xv9U3%FE-m=c;N)@`fq%DVa8uJ+_?V^e@YNFe!v(Om!3 zYP~$Q7oOhI{=4Z-v<*gc%65nPB;?{ zj5n66nZ+l5oz*Cg>;bi*{XvB5dBxOA?XO?~4{@t>>i+h4d_H%Up!T%47; zxnR-l0Ya|B(sMTjwr^#}L|+SBY*4(LC#N^UrxTG!@@bPQOb_ zOAGOgo3U}(-)(F_R)W+%*S*WYReX8_v1>6Oc~?xg*o1OKN7LO^N~=n@|L5+yo>1A|QBeNy`pgitYWU{n&^d|kJ)=b0*$#*t)ctjkR0D#X=9=#7#Oe;M!5_*u+6{6U z9DH1oe3?ZI@#9|xWkyhECikqMi5VTcNu@ke;9ztqNWn5DRLft9d@E*vU4btU(GR34|NjsxHGc1~zO>{vPsI(;&ix_phLA;h{UW zJk~RupZB%r=eyoBnwguwrP{BYW8|TuBR3EKc#~EF38s|9?K#HnTeJLw;3+U92_!q` z99RIeCgrB*>dLI`>tTV{HZifXfgxlTT;u()AGFN?5Y^C5&cJe8U6<1IYKaR%cts8~ zW91xj`F3;cWMCb!3s%LQz%cWX?f{&;-yFwgGbRGeH+)Gy!Z>vwp#dTP z7~6#`Y=UA_*ZBTD1|=4d%Bg=_MEZ&BL|-`xUTigLs>U*q%ccT*pgG?)#d-f~ad)Oo zI);uan4XpT@3jRy@jORXF5_#A)~ZHQZRZ|tC)xAd>bpW^Qb*n;4xvxq%xa^0Wrn;= zggtX_{<)j0R>g7K7gUuMQX>cW$9*6rOlUEhPjl6 zHf=l)x+Yy_OJD5@Xd!}2RK<&D?7!j-?rfVJbOnTEtMx0+jq3vaYD5qG{%|r1hF9^4 zwke7Yn8u$ynTkw_Si-N`Hn&Vx(~2jzT+Dtu%Pr8yk%|Z@kSlAQ!_qE4;H%QI%cvGl z2DP!n{-|PnEW~0-KC3;4Id~Ka%_7T^^u7FA# z9oUv2ppy2?PLSmP)VZ`;$39=;Ohg7{rL~VO&KqG1bpP=Fvjl3u9sY+O(>Zzp^XdyrF9%)KC+J8XSiVv;wE2 zZy?+DIiiuW%&gXuz}V7#kKg_UMj>JOakR3CuWujPRl=Cy%&rdLdWRU=;o5Zy z9Zql|?k3*9a2;X)aUD!l&m$Jm66}XU0dDgipWkjhgmTt@!=2%ucD$b6KVQjDkN2m; zEMu!7H02*eMFZ?IK;WCR6t+qFWHdVSv-1GvFbafL@6I+B2ngqED!lotMw=(Hb1})8 zvtb!&^!cN5b%>lzG|?<=3A4#GGrdWJOc`x=nEv6xnb+FKNZO0{sGrgvKB!hqAN5pA zBjz5k2z<<85l&|LN~yvImi|g2FI65Qq>_5@$*|nXdc5NmL=lxX9REPuAR#i6w!N&&@J5mmDTI=H2XhM25l*<|4l}WxvES*!?A1#5Jxd+yEGL zLPlrVOh?CPRkJ_~t)2oI^9{1%{J`eXvq6&47$^Sr=w)CCjpRr1r-Lw{AY1#B@podN z`rpJrqRh!=7$Arl#LcYP{oeZ`ol#US$B z|HOL)h#M$B(3L?yL0Ntj)*&RAtA{r~Kez2(@BJb9TFVaMpBF$T<11PJ+lDe#pOkWX zxOZ3cxChpqK8JcSECW{kkLI&oM&p=Pw!bWX$Vdp?cNC&IIv0~E1ymOa2KU{7K*N)1 zn#>cQ+|4Fa!(`>&-5Y#&U2S)au|Jx?t$fI__E9_h>|>vNCivM0N?c}37D9?Gz4-eS z1$So|{Jj3!6$<>KzC`s+_f4EXr08haO`z?=CPJE`U6Tz0L-?(uS#0sVNC68mSPd2R zPj_0Jp)_)L0Ztria4V0cj6Sfz=UPt2RRZjMn^OLQs07^V2 zSyOe^eSfAv;1A1NZHb#P*G^dW<_$S%0S6Zt+}KbG=kz8RI_${$YTABG)ak3gIdQ{m zfpa5fMz)No-D@Bk`wU-i>>u==;5v%nO zdYca|72BWoyA};d%H)5eQeqmaP>V}_z?r^>y!j$E9#^haN`)w523Iv7k;+-^J>XV2>FB15=v$ia8qKP}d@gB13&u_AS1;L=Ar0cAoYx5K?nXsDEaq1Gt6bN|xZjQce-ik%0-@HwcPSfh%k37#iLW3}qXU}M(F zqSrRu%=XSkADJZM&{oRSeTvEGz)*KP5cKk=xbOo-gN~NA9rH7qeh3Rg%FUMij&Q}= znasg0CeFjml+6{wKSH&4hm=HfLLs0chlUg9e~4?*|333UO;uxHP@}T)!0N|hnp7-h z9MRi}^v+0ZP_Pa(5sV7I8g)KbZ>uPqIV8p0&4qau1Dl%EEg1#qGhoukc^&+%{)oHQ zF;hg_tturBzWig8faf%|XS=M)LaCEq&Bg1eS2koJhGy^iayS{AwQHdz_2*3EAi^E4 zPN?mx4GNI42j(i(OHYP8A$}g>a$$dWUH4b;#eo1Mb|1i>uQ5IC(oinL>1=PyRIXk5 zxjGSx4NwPM+Nx0s+}s@cF$|2>%K>nZ8KcJ-lSE~5t#-IB!GL?ZSCF}0RFHvkM~gAk zo#ZKK`!Wfp{U@&H`y<0WWRvs_iAq_)Fxh|ulhbMMs=NcB(tb;d|@xw zYP1o99$UX~;H`IM;XmB%07j=_Y1#hu1B=OT6p>B>lY-8u)n^H?rXTrRi|W;Wu~KA4 zZ1xB5>?y`Vbf?~DqD$0nk7@^P?RZHi^V}$!*I8J0xV;)k_L&qF3S) zNGsRf%Yu&9a^NrZf*ewb=;N2d@T?fw`mOVu#9)*O&ADB>NOI~H3a;@vz`yorJk=V8 z&yGu<&Vl$vHnWeWKLUEE{I6sxwSSjmD3Wjr+$%R4((n&b4~sk&?Cw^33Yxsm^dM8} zOLivgZy<7*Lm+Qp(*BE^o6U;Hb!YeEqkh<9UUv3cE3^jD%MjD>e>aFkBkD22kEpgV zrWE)@i|#Ftw=Ru_WWZ$-)zR~(?vf?2H9XwK;kWX`_G%}#(!>nl;Ys_>t65`1th~7` z2I6`}^pVv4{e23YWb;{`SlvXdto^>^bV{N}yi^2!sRtrOlLwcj6U89?or)@4Z=kBW zicY<M}JRKqeCgTF0P=HhGv%Wgsd|C7$jc`PY(7C6`j-lhVJQRT<^kzz=PtsI;ZKyq?{jjqN3K z`O>PLo|P)TTwETgqr7#UcfOt7T!T7>&kf?uAp^EL8%k-*-|7OxN?W_f{Vtzr##-@r z-iQ0`kzbmcc(d0ZdJrsgYT{|eh9`jvQ?I)MudX7+f}11umZhhk{&W#9MlJLIj9Q9) zmM(@RkXJezfNsBF<9KYr>uw|}(p6*m4tmUFz>YiMO3tf97py{d_+x;fuJmwMl?}Nl z-Y*RjeLH{>p>vgBw`YdB%HVMD60sv*0m&qoSxHu|&unl~sK!dhE+i+8N4CXk-Qqhxq@*0wN#f>ncg#d66!VI* z8>d3XD7SV*_JB54NTg!ky%pJ1_rtq8r^|#kY;;;;E5=y zXu|>g%I*!3OisUnb5kexd|7uC;T6g=e-R<5$e_>K5jxzsylpK(iL?v0z@b^#jdexD zDIdsK`Kc&)-4LQ>+{?woXnpEsB-x zgVBb#gF0J|;#G2*DJ@m<8R6QSk%9FeHr4jUzR6V}8qvkrx}lBnN0d&HlZgE9zvq2l zKV0_{c5f&OR5a0JK8J)LXt;Zpcs16cd-1xf{?h0Ce^h;CSd`xv?af_cXxMpb6?co|K2;F_<)a`GiR^8_S)46bnaKWtx0K@ zgz@lyDAS18v1L5I>5O>hv>G8Hr>5o)*fcQpm%M{>Egr5Dv~O)@HqAG+^IsDB`{nKa z_rEWa$Gnor8nfPi=OBi94-vPj?X{N=lovY@3lfC%%^u)KI+#;XW0b%5K<|^7iwWS^i=-oWJ>6W^LAE>F$hL26Q3*%8?kbPek0M6w) zZlF~OAv`nhUi5obdppW$k8sNIF5x=Zm?ohOsB`&gj*p-0Mu`LA;so+*&zIC(`H{uN zqw4=lrzX^4uXqWCp|DZ``^U9txjq0bXOI2gK%=gAMC->6hMviZQ6Hdi&H-&f`2Hv? z%`fyrq`j_z!O8)>z1P2->g@lO!)+h?M0J2b0qM7OyBRqD@RqF#>luyD#u*0j_IXiK z005=w%;A~G*k`tkAM2mzGTno8EOOR9!c4&}ZhXG)=yWNGCeOQ4n|J~$^sf3oNcDSX zoqb#Jmc~0y_A-}}<4AUi$lRUl`nHjK0hP65m;Z#mtoS&(IOj-acz_p}gjJWiBJC1w%OSV~^!a-I^{8tGaC~sIbi7L>P zJ}m&v1vxPytYkc`_CG|5OSYYU56VLbDHNv%xc)49JFg@{Ld`f1#8co6d1)6RgG z6-_qc9r(rN9i=iNd=jFM-13v8MndV~(EeXC&SV)h&iZN-wzv$MILtdXJRC)*Ni)$^ zBp1d-c%yMI!xu5#8`(6PW4w^M$IZ+p*!?d|Y&`hBFg{l~GjvSjM+P1b#z2)|5%Dgs zv$N>c^!EB)T(rjX-GS_V;v9+bEZrc&tNI|7KFVYv=hoHUBLexZX&YM3^e%cuFS0_` zxoEmFSAi5X?Y72F)beD$j50dOX6o40GR>Ed%1E!IbmE}HJL!!pLA(S9>OfNLKY+;<&Vtq~-b8 zY0l@=1~W^-80H!}5aL~KhSi^AN~`pZ-%0``6NG@21NjGgvPsVTRm_j{NerZO7`l+y z)S&DQgq&R&nep8jk`H|aNnAnFzu{plD8*lnu$m|3d^HT!HLJOgt(TrlMtuv8*ELQL zcqc1<$kY5ZiE#Kw2{kY+Nx+p$vQ6#8UHhqDl^f61z_0g_hrqtdVm9U;lg+3`l9qly zf{Q@h-Nv4~Mo!!e#1-BqQzL#;oaba&g^N zm6eV1@$giBjQnj65(_Tw21=4eKqh8p3B4Kr7N>o|{!eM5wW>raq;SQ1a}T)3(RbP_ z7Q`vFTH$xk5?1MPKUyOTuS)j^nu@Q4`66bBkdqVHVw5-l=@TE`ljF zX!KP5r{k#svSdVRBp8~V&hF#^w7F;p=-4PII?68Bz370yh+pmXd|x7J^b0NH=Vq@h zpL|?G;K<`PlJ$NvAg+rrk-^mZ?i=O#5wI@0z<0mOck{;pMsC9p<@8Fm$DnjFJSh(S z!iE*}(-Hp9ppO}F3a0=)T&2O9R377txS|~i=^ry8kE2zz9V_l2_m9lU|bg&k?iQ8Lu64&{D~Lx{3q=#XNEidvLFpZOx1EC(95F6 z=qRL%Mw9sdpPPuD01FD-jEbiUdD>B|H!kJyltwi0qun6BZ{F$7I|jEKV&-9O;5U*w zhhdts$-{e=q^o{yL0^Vus|X-_CWjn;j^^e^+t5^WT>}txlUdti&L=PuU+wo+dTGGX z;?b4d{HPD`J3w`UwF{YNEDKSO2RA1tMRM!fKU?C!yavbqV^Yk|J@4R#i3u995#pXe z8hLnUd^S)k7!vx{LDW(*Ze1G6w~Vf;2^qZ6uW?(A*&tX0)Ccd~ele$*@3GAGEHxd5U1evl^VHLeD;t=*T7?&!{hc3qBu zP9Qa2vT9MQgB8aAr1o{%lJDhT`;&3$ye`?WF?IJH*W}j_~3zM)x{)p zvqnsQB@$7PLzzOs@045_Lnc>SvvmV}MX4EJcOOOrrE;?^Z7kCJ0yS}Nr4q%L1N>5f z%o0~>MwoS!IPA&bm)-am_7*0>M8LoePb1;>7eO`K1j$F*)eydjef*2o-7^=1SA34+ z6XY>%+Nx^)Gpc$1Td?+TKoS2AFaq=d2CEh%{;>;rj8QBCf3z~bE-u5y)C1O+17M3& zxL^&@VDZ%LF1cN3s^k7k2~lZ@{ucl)M`|8!*LXCAJajcuru6PGHJ1wG0P%Jaa}}0Q z$l=SmeQ$7Jl0Cv9$gmXaRwTVNmeCsx7yWxPso~X-X@>Q~r}r9> zFZf&qD0>6-&4va@=6Sw|PBc8{C%=e^7D-sfFG?+gE%|m!+1t`3cqaktD1lphEA?sl z(HDKpsi}UP&xBo3=5rk#5=Lqu%ME}nnrAlBzenL;EV{7uuUqg-5aNuZSHua4P1k2# z{weF0vAWVPA|9{Gp=If_V?ukr#s`N*;k8n>h8V79+mWGKXqt_HChR@>OQT#R+7rd> z;nnUNVB^XLFx9K6vC+Od45=0RJE}yvd%|}52U-b-f8_`RD=&gKVnnWtU7+a5ThHz8 zaZ%FL-Cm;oZwobFfZQ%E-(3lgr5o0ZkIdM_o0z-r`Ou>aLa6Km`biE(t^jOk&W|ts zU#+Er=)NB?kwFh=jhU3!E#!~cw^>Y8DR>7IlRqB+C5b_ng=HHcDP-;JbMwm}AC|xD zdC;}9J4un$qWl}Wej)mQE}#b}zke{)vSaYx!}Z)PLZJ7w_kg$BWNdERSC2p&cpntL zWb?62p=Fqljk-*`tF%%f@8g}~3-#PUX+vwN&5MnKOSvd1iA z?U?1)XnsVI@}%)zFZ^8o91N#m6V@+3g@)J>NUQKsCm~3S?k<~InOREHUHfZ^B7FGj zN8%LL1_^B-XoA?wW-myQI=lw>intGbWl8wL(Q@shdmlV3j*l)SYaHLs`AA5Gfd zX;}aokn1U8F@PdCs*~f&A-SR1U}3l80C1xe=v4wgF+kk7Gj^WQ$LIt5#(_!FI+pqR zk#8D7p2A%vwlVdB_jcTU|CkszxJ`TGliyt=INl5JgLno8J zOFATPq`g3Rqv6G0pD`xLA;Ee{tMwlp zZ@wgT#W3i1;d|53yhcS+|KRBphjco7iYe&m&sU5EYt66kt=63t@v7~19=fU+GS}nkA^lO}U z9TdZq5~?q+58va8Gfx z^TO^!24!R>@7Q<-rQ)FJ=%_lDl0~yz6lwDEH4|S@DReQ#x53vxSg&y*Y&j`=;>=y@ zm2i(z-Ofo!r$bGx&TZe&FR3*L9blM6jnz|Ig;DhKnT?i#{-h>oem+`Zw2{$rPfx6Y zh`iK2C3=V#P>A<^XWOPI6B(gNuf%W<#nG%*g;|y=KuH+VZZ}EAs+lit_!Ti zXV%YGQ-8a#I_nd2lQRi0C@fClnkSRRrV2}AJYOk&Nx=McB|3>HIz%#uH-tquBY5%i zm}a*dG3#_*z6`e}M~aAJ!1GJHBSdv;p;U0!Y2kbx-$;tFOwsM31$wi#~tdv`!TToddo{05ZHhJ z7UXfU^j(V1CZWY^V@NBFSTql!$3JbugW#rqNo@xQ} zlV`ZNc;+Xw?p%9B={!B)4+}#=LL_?x0v=B;PT&ou1wIM5QiEp;(-9SXPfqyqSKFBc z4wBzKNlDeN$#mPZH==I%DVnN|Ce~?crk;zG&`!;%Gi8E_%%V}xtyAM~UMDl{78FF3 zUSuOgE4O87ZqOkWV!XTVmjlKtJPC#sb3Kp<38^YJd}HiXPI+CNz)>7YkZDc|T!fCa zW3C8v0vCJA_bounG_G&pK&q~ZG=|Ie3Z;5#ICT_|3EW)Myh;kw4~~Y7HOzG zQq=E9cPkw*T`JIG+0@}}0#{soqO0OWJ)R{cj*fr>dMqD)&`I~n@!=YHEB|cW=ko> zzLw2NU@=(cYsAt1c#Zm$Y{9%;gQ4b*Glbl7*b#9rp5CsKJr4XZF62{q;~<4C5MIHy zU^(lY$mQ!c9QU(>1*}VyztagonMTIuA~C|N?;8p+culV)A(T(hcP4D6lf4TvildWb zph(GgU79Y~j4$c#uUgTR93W;J7NUzgKw+P|{*uhWiOWSvLp50qtIoR@>GuwZv2&ro z-`&_^x2|^cW;6sB5rUVDzP1}4oAqB_ z>2(GLg;1GZ*zP7hVA`E$$H zp2&$Qurx*o=t?YD*M7ftuXu(aI!SHFNnL`@Cv0ju6guOY=!ebs?3zm}^vjWS$|qZP z^uhTVOwc99nuLRAI z9744a&^LgvtgBELTj?~3=C6;ELZwypt;V>Qq_pv*G*>lxC0kK7e!7MX{;guqw39y? z-Sc3wyo3|m6$w{F`)#y zYPBTf-96teNq0h#u1(%xrDyAnQ2*$A-$r zj5c)R)D)lb!4LES6yMRu+n2-A@{l|SVidDrnWgd(6Qqv`if$a?gmOr7LBrn`LZ7M@ zxM$9ByfG$|3L?&}VEdj}9G@JH&(2+|i0W4S4p=IRdsM`6tx<;BN5YgaGqgRBRhGx4 zl$Vgpo3AmptxLY)n6uNY@Q~CeVl3iPZ96N-<}|p4bA=nBzsJ*(EV$`HrpddN`I7^; z(I&CaeR9%Yl~qfwe~#CZtiIBI3w6)R2BC@C-C+rSKDSWTiG`H~Yv7f3pt53SaF>pW zy(Ht8>U^y4X69d(%#N9C$!$0Z18zO~`uYkH!hy;&g9^#V3rSCG{D%#{3zs|UV~-}s zQ7JmS^fxem^)u~8fzbU(ogV*dthF_#&-1l-7y`mpRw|*)-EW@zB(aIL0cBEV${oX|>J*LWCjAbDCc3}yY(_mQslXkRZxdL3M>QAy1c`{~Jf{ie<0k+Ls z`Ia-Ri`XSxT0Frx`{})I_v4Y_DC4I&4*9HEDpQ8lrIC5Rl{&jR6+>^nwJ^K$&rJtX z0TR<~pth2dPhq~6^Bjkeb!z~SK)>BpKfSu^c81dvbLK4vhRtxIcqp+A`O@N1SerE` zMn{`$9aLx;#KKFvT_NZ%9=Sa=o+n*ld9(n$5Y+oE-$#_B&$D!@Gi6BmO5tCNLv7Ic zCOG1=KGc)UiL%?Y?Wi{wm7F&0El;pF;D;4-g=xKvf#LS?=a4-f78c?R!yW>${?9Dt zX~>>zd1AK?igdrD@RIU9WNV-@lWZRS2-9SAk)urf88i^iS`u>^W?UR@gAj8Aeds+R z-yXr6^4RxZ$Fr8y_!+QDrShjo==-T)W|}hxJAP3v{2#veW$yOSBT_*4zb%=NY-2gG z#rgc?2zdg41tfuS6B98qZ*@Sb-!KjenO$JG?xBfEkIU*co<`EsGWp`nxI^8h+D?}w<7Zq z?p-0F_L)cEq$PX(p9Rf$O@Qz5HR~C@QZrpx#l3x#-*O&)&B>*}IhAQ^+Fk$cwQ+-m zRcXB5OO7otKwp-xjZ@QSL~5I$IldhoB6n|)G!ie@$4Iw5J&TW}7bQC|pqr6)#M{cid97G5iA=f@IbJDkgUrVa!<42ZbrqvGVeP zk<72>8`Cqz8^d%0LLT+w?c*fJQfn_F-ee)Ckpj4)+M93wJ!jJbApQ^6o6puR489oA zr|6a%DKWpG^Lz$OrMtRyzQnvh2Gsy_*Ng_*V|+0r1WhuA9L83fnx-`6qY>G3HC$hhocr=ZB^7> zb1JfC!d62}xtIHZCO`$`7xiDE z4lY~_pqFtca=T4uIg)jj`9=OW!Ksm>Vse{TP@MBUtmpSMkH%ba^m5tc-{P~HJ&%mguA4qd}o8< z*DBvgo>748o$D@jsoW65XfXSvrCS^)@@onf1}1XnR+@sKvMb9lbAXp`tUvPdUjnQH z%gLPbN)X7hoD7zbsj0|2jfb^>t)`E82h2^~UhtD=9u@A^{O7ft5PrHpPv_~sr=xI* z+4FGV%L7AfG{~M08nS5ygE}#Ii}Q0oy+at#d;X2X(j_`=CfS=YgE_pm8!rAAN5=7Kh=FYyiln?3o@7I?fxf3}c*-pV` zb2+LW`%8XRQohjaDJxefnSHym17z9PaiJ|U5dDJ~u6&fTQI$}^Os=1zFRynYv^Y9C z27$&xqh|SYzWEIT-0j?Vs`QqfvHlER3McnIGD5>(aK~Oz)TFL<$Q&^%?Pt^pxJJW@ zl>j7+!^0$Ajf~eaFBPPctjt>ym6fyhcgwV%Q5SW_2A#Rl#T^X1ZaB}ioV;reEuMV& zh&@9u3(3-~M72_j&^bPu!2PfiXyrG1%SJ;dlsYKjEss;a-n4$ZqeX^>)EF>G|L zNu|WLRHVVzOILwKS>6nfFD1eMA3gb9OzL{0(@YSCJ}Csx_H69t^~ky;Su^-8mcUAM z&@;e(W@qy#2ACQcOgpo!KWvl#Xy=V4Dt#$4mIRLv-C0oHuQJ*9?k@I+q>qKcjKxop z45mr=*^tvkG5F8w$jFxy>;{}Vvyl@0RO-UF?Qo_NRI_)LX2!*7MYMLT2KO~nV!z)Q z_cad;q{zetk*La7HXeV~s;Cg-ebuW!$UFbJx)kfy5YrMw}5(@@4aK+oYUM%Au=Id&9qoHY<~UX+m8~r8QtIMbu)G z^*v-VepM=+5wZA+clYAC`rAi<;avJhTqV~`cP-}w^20#KZ$RSE&)f8IE#SMPY9`9{ zHiQ9`>&!(xNKerpD;klSnYQ?tT+I&LO}VXdF2$LeO0z)2h#K0|(1=fFsMKBxQWb~1 z0?z`Qs-j2OJ)(;R+FtFFR$dOreSDYNq^ZBLOmVSG|73xRM_;;fAA(S zhy%qs7IwTVpjwbn5A?IyOtEu zeSaK#2I@adwuk6VRO#yNV3V&FzQ%qKJ~;DB2Fc;tntN@e?GjOt(;gronPqppdeg(ai7e~G~0XCA(bp7Db z&nnUuSUdNTZR?~uPV2E#=1)-P z@q(3V=mrPT`*O?|1E3(Ohc_=L_S%VKSz2-!6~#|eIIo%b`HKs4(2X@Tf1JlAKk010 zuK)D%Y~B39i0U01{E@G5(M2z+b@U_Kd9l_$hvWA6lfQT~$K=^K*y5gu`krOXwV3_3 zh(M~%K%_GSiuoj^3x1#l2J4RQHqL|P*^Y{V4VYEy3|YcST$)LO+ZXWBRxIxD_T0Jk zJHDxt>j(4pEoJ&IFs1mRuQ2@N%o>~M80gR(M_0t89+OqV;*}Xoowg%eFn0nQJb@l1 zr8RT7H^hMJBA*O;1EnQ17N~wL-8<5m`R#fduH>gN^&lgnK2m$Z3~@-c2IoE+@{zln zAb=pu8*fyU7KfZq#82m;!1}Qf4&%YS6UGfBPZ)>M;?|6nSSE)$h9a3Y5E_4$#+Wb2 z5zTM=s!V;|fE4t#s)T}VJ&tj@VWh= zbkHVMf}9t?6=>$F3hJbyJ;Wq8I-RnBd`zr{2e^r)DtS#VCvvJ8?AI6cOqDegvkjN* zE5vKJYn+W9=zQpDSuM@XGTSnhRpZww7Poxx;)V$Y=NNgXSt^5++tTVg9fJ?r5r_vglF^K<;2I;dXhUVQ5_KhSnwQRqn^{Zdr zD*0qW7&}FZg}b{{m45SH=F@H)s~gNWJY&;2y%U=34qQGYwPT+R6Q}iU!BgoX6N+YW z-j7jZcVU+=FawY^R|V_pjuQJnWlhIa8tYxIA60@Nc_Q&EpAqiOBylnZms|J8SQ()8 zB&f(N6Q6ybR`w`gME=feTXU=ptz3*aM-NB;GSp{ zmO8Q@`$PTPadw6IO$8=DT)vc|>#o`r^z}ZMJ1sjA1fC5kg-Jilh)WoHzmx-4Sk+bl z3>Q`K&^aqbB*PrQ1_5oeQ7d6srGPwAZtAG9t3ix0zzPI{m~TiVIe$m&&n$tR?4J3E$n$rTwfS)B8U(M2bN zUkc9FN)%Ep+IMYg8;pKp^M{406VgLxA)g%%9*yPQ52SzidRo2|ec0o)J%#%12B}DB z>_)PkVsC3>RJp4e>}9eT|5SeCxGxUUflI?sb}0K0QOS7Aftc7n6(tIvI$r;4RAruf zLuAqy%LVIKebZ%9-=%VXiW1Wf-RHb!MH1n(=Wt&2x_eSFdoIy2Fe!dbSc0Odyty+t zy-CTf>SvMM4Ig2^^Rj#v);QtK`P}INT8bg&bCv#ja{x~H)J=I~Hi7^P0K~-)cC)io zRV@*yzyb9J+JdB%*+7-M5P@PnILZ_blybZ*l8hl`prs3VLOdhW37pPJ0ED*BE4(~k zQ@^JRUHUetCBRb$SsYTSN1IQ-5dHZvq^Jh;S4&ng~MJiKD9v3E>_N$t4H3R2Aey| z*zvIO3+wV1Z129KrWg+;j>;z~3E7f>p+v1bDV@NoPElasXOZPCpR(xuQgf6H?D$xL zER4d)U0e=V6rpkpBR_zad0@G!^ObFsq2b)9pozV2Mr(1q_-<~FsGV-=8d)Nt+ICMz zom)m6X(vU%ObW)c(@wj|1JZz?M=FaK;`rAxHe-sh3Quc&`ohrsaX`Hv&fFu1TEL4g z8xS4ai$?PiZy2efY724%=c*DD#dj0++`cW&+KrEMc$k-GREq=g5C-;<69c>q;BaJM zbg`OgryGs5=$OFGQb{H(M39tgI((FigFqqR7k)}W)= zpK_-!%w)_t>g&_&`l)0PV~!<(*eWi$F*CDkh`3bG%3*E1ibbE>b3_)zvJDD0?I$fR zz4;H`nXZSRsCe0sxUyrDE89MWUd9ft>Y*lXT-Or zITt^Ir#l@k?f7L4?U@SuQ>ui{Q<|75U@ApNY$G;qfO{=_cq{dRw7t0dP1H?(!b2s# z4H*^9QY~Xv2uek_3kxVDKfL&CdXaZT04%mMN1iB|}mZX2!uHm!xQhN2ef3{ zz)5WH5KOagRL(g;wiISxJ8aAB17%)rvN(^|;iO2=oT;AkW#k)aCA5$*w-%|W3$L(a zFD5)pAV`&2;=1|q%YV1>PaLSGvusHzWi{wQz*}VVtfg597i~$LL8It56PMc*{+y~> zjK`fgM|c@81{Nb%?Ln6isvWv@g+1{l@V(Krai{H~I^M8RkQ zIJp-q`F5;~8^R6y6-$s37wg&{8)H)u=o`r=3wmH!`LeNb|MN1QC!q2EI4`Jm4OTpO zk9eHp_DoXR-bJc8J$!JN^}kx^wDKqb*^Jim6?-;?6;&Cc3^5=pr6u?*L@a)AjjxbYoPdaSfiMbL;*6CU6ZIYCLg*YY+nN6F zJ0Sh*Fsm~JLj)x}0{T*8ab4jm73xx()cW>gd^;sdXwlk`b_Fs*J6b>U6n7;(<>;WGXm)>hiAR zVFRl|B%|0cZ}iN!C1z|~9KZ3qYJuKHmE9qtcegnt5i602>2S5#*M|SHJ2zRYg$leJG>!-9`P{=^vVT=!jnAkoSh5b5@k zC$F8~)(4{DLg3AqXQUii!G@cqyn7JheYNYdv@x^ea77GueXA8FmUVmaFn-rWo9s1$ zJ$KT5ikFNHsx!&{Av2tQiAUZ2m*u`X1=+^QM-MUZq9#K|qLbPAGN^CPWEu78fg3C{ zQ#TjO=0Cg%yBiQOb{@Wr zjE~88W6AJ&Lnta98GfA8;hmbqYcREKFaAiCzVb31w>vQGX@YAyyv1NJAQB7nOnU;8 z7wPnMy_PO+SHz4h3u!Bdho3Be>WUrO*7x@UIR3hVcOU=~cle79F{pJBS}Uo7NvsXY zaq(&+b)lor3p9=iZA-deZYU*)fzuE@%z`v>4M3Mg#|{~WJKa?{EvAmlu!wCfa1d`Y z+FBefcMS(byh~)Ph@iV>j2WYau5T=lO*of^Mds^xs~MQ2UP#duHNv0LB`<{=O9g#+ z-e=g>P$SR6WGUWE(_wCyf3?6a%9D3?(0;%)j|Ia-xBug6BSi77$r1TDa>d8P9a zwN4wtFZ9Oo6=k9@6~z6ZBc!_@Op%k_#K6Mh|G~7%$b}kV=SI5N_9y6EWXDND34}WX zh@gcX+kXGwt51)6I$3AUP%eO`M+|bzx!@o~TRfW;x97Q?u1d;dtaR%l_QU245y>1K zpt(nI%cdgVP;;r(=23j8hX_OGLS{DH!Tc&l_GZz^M-#u#R25oL{M#WJW47)0rl|ZY zZ{&rL&IWm!IErNomFA(TBgU>kqUCJ7Zm7`kxeo{I`>89c7m&sHU<7d&x?jG(IjZ|5 z@uhFYD+(W6gTc(S-#prE`!cSY@C3@@EfXwBM-swjH^_^No;GOnH3N$Bm~nKNn?pND zR+aNC%v19!$;jei5x#FKis_=H*-g4)M<$E_@-zM+$)rAJ2G+041j)_W;=3U{ovyQDekHE&!iT(4Hnj+; z3a1QkS9m{VcsXu;R{;#pz9GL!bZB z_OIhU1 zCkdBBhB8kazs8Zn>erX0X6ob^H{WPjg|f#ant>>LmiIxPrLxRQle{6!lkALyV8|pZ zz{i}V1-ufyd{7Zno1pBbV#sSGUAH;|ee$ig`IFV?w#_7qg)_4TEEb6~DV z%qSW91kRvb7D*k-E`x;B4ULwld6moa-F!3SkJ(mG{yV0mK*+?r6RD;!vDYn^=dRKL zLjij}aR*1CX@}_KVdM11l5Oj;8r@=Wiho7Pll5>MvqMu0{ajcc5^kuEO|Mc{C1l>f z|9gNLU1r*`Li~uS+7oNzM7c52#9mWueuIOEVnX(`Dd_pvYqZ>$I$-oIi}?>_yX^^& zhh6C*!BzpFy=u37$(K5BGbxFEWYmL83mD-e3Wn3$3nrZ`{)|?lbz2WrFf?X3S5{v7 zXl_27aH#HO0puXs6|99U0v<9+%_-rk)<}1O@r4S1*s12fYMFEqiSEnWj zU7?PukR>?w*gTqQxKOJMNl`lH1sp^8XE5DpxUBV54sz6-JTVZ*pc zVGJpjC{rP5N!X4FzzMZzEp2+Zw~hbCu&3WF0IEr<6f6jIk{FiwT;X)Q+1guK| zD2Nu;2p>#89f?~#bJ@FoS@mr;UajI(on;gOb1W#6r~nP1m2v}eeI`RCDj**DI^C&` z>Z>ps_e3Y(3?ilWe$oy5?X=7#8God>;;SUua|e5R=m!9BzZkQ(2Xm=+k~Y-guQH7# z9o1#)XLz}76=CDF?7SLQzA zF@UrMleD9-Yp^>CP90@lzXwW8G4qC|39r%ayCLgj7FoC~ZjGfF{jJ)tNcwm+Ddooa>VN1tswwUWd)TME* z;ajaTmJ~}S^fp1^Z!f(;TN#x}uNnb5wFTX8!JU`a#r`0&mLg%GPP{H|yAJn}F3TXU>^uw+ zJpLGBz+bd83${?lS+oS%wO`-YoRHzZeiy(@Y$2|`BtWwAKF!T^ta(hJLc!V`BX_%9 z?S)gxdU#0~|Aqi2xsTzDMqQu9X{g^T*ie;kGh2hAxq7aulNkqZpT4ohLidlhW(9*9 zofTOAMr6*!u&8#+hEc)#DUtw>)3I=rE08`F(t_~&F%!xQvG5oyamsu(eFY4Mc9+K! z>nAkX+-kZ{AOq6sqLZgLELKu%PaQ8}7el>7c~%dNc0!BIybiqw^c*d7-iSVaQ=Ibu zu)i9czo_P#*lF2{+~Nk5N0ED@QgtJ@vp*5aF??*ZF>J|(>>}^dh=xgN+rFVdW~`c! zOO7DI`ik@EL_zF&Z(~XPMXR+To|PyDgYxib?fBrrxYAhmz~;M`yTQV6qlF*ekmjV1 z%?_(ts^|98ja|Gvn%^RRy?8Lxud=$PZ}9$oGXXZe9=eX&5U(@O-m6I;EIiUX${VpX z!h_9m8n~~JGbZfu@nw4eKpaZprzR&g1Ka9rX>ywtH}l@D+RYs9tSedj{Bh#6`hT6c zeR4|Ee0l8AD00A}Ilhz4%l#{z@@ui%nixTh3PEiQX0J43CHnsDGOMK=Wq#4++h`m- z%p2IwtK@p@SKpa69MDLbCDN?l#{m}hB+(C1F}24uYkSn#w!QxYp1DUZO3L`C=ZMyj zS<0B{lkJ?2p^2TD{zZ#E{%P%9(M9H-*cSQ%)>p4p?fEPKaQrx&L`s7C?t%8(z-VTi zxQ?0>g}KFJtsLjYnR-F7q*q4$SBqWdw&1txBnO8s0WCC_!(Yf5_II_;$y2%j9q{dC`dV zJE5Ay^_W_iyXGwU+ulX5kw^BgaSm*b?z8yN>~Q*|991ewq3W9$!s`Xo<}t?DU9nJMSq=M`^o^dEEf zZgUD19`R6Me;PpXzz8eUtPxXJA)-tb_6QezZj8DKfFCZELom=_#H zUUn0Jg_4>G8{f4aC1_a6r{5BO!oqLb>K{mB9-1(mnq0D8JOTtBR=_`$mjS+?W>EbV zKKBHM{{wNCYwmnFbC27 z>ZLKKc1@P{h-v8(d9F&6VM8&9&IG*yFsrKHRqSPjZU7*D+IDrZ`I(8_k(huRs%Gr< zPagsD40==p1U$+LAdl*&H4pfc$^U_$$zSczU%YW-@`RXG|HKgPmJrwUnZsP2vnnok zt1Ivw!B;4zzJWfdE#QulYK*~Wp67dVdyfb}6&k>&J3)P^FI?4?%FV)o(oEANKW-TN zG>k}rY9q)A=LnQjsU?}3GA#BhcbO!uTU{jJTN%i9e?>%NEHKK6 z*wnrae!JY6ox1pS&b1qss$NaF=Irb}>t=)YBpEnbHi>sXP@v!+H6hp$o&GzDXKviD zA}3dVYDm9I92B=P&@|^pr?y;kv=Ga!RazGG4Ki|*f1p{ty8H4r;@r-bgGnl(rryJy}-$7N%UmVxnsl+L1hen{aNUePJ5%m z6$PdpPrfL4?@C6b5Og?nrE0UhOu3eT(--dz#6@VXb(FyQsxkmyX5hgSY>)C#=_V)| zfeIAVE%oIvm7PAKh_qMqJiNtr;N|2wzH;!#H2scIn1$E=I`6Xp7OdmR5UIx>-& zqMer5qgHLa$$5~C0m4(b%l9OJBa5k9vOc{8ruwjxGjzuv}mOB-%=@d$I658|aJz{0A z2sBJ&qv-7VjzJ7{m}BWLP>toviuK>=g8n<5 z{!CHDHp8S#7=djCXdLS<^c{+w#-Qbd$EI$is@D@c%5fuWancPb(T8b)U-;uR&@rw4 z${-dh=9_qVHU`rfjWwS66qP`qbC_};!{Pc+gUIvr{#?!zl{Nr61nZ9J@fdAR|(z+jn{u{x=%Ts;P`6~HNKYZ(sw#P7F-@hET;zaEXFu!Q5P%ZRp z;bvcw5*_Rshy4g_QA#tvcmGiUM04NG%nl``F=&Uc7mP;`02#J{Fc%pe`3eJ@!1q$} z)z<|l#GBjfd9+8=9Qxe)SCKS{YIQHVTT7m_#cy1IE5g}u`);d&N9ZC~@_Sk5Ba23O z5g3CpWQEyaqkv(RvwuzC>k$`4RW0!_?%U7MHGQc9px)lVr^WK#Hxb2)#}S*}|C5-0S)f23R#=P9BC%{4JZGE6k>4RQaGdV@Vb>7s#9D-@ zJZm2j06J%IZQwNqjWIb`W-8i68-N6>(^;!qi8*;;XILEbmR;lEGN?nK; z)q@-wFVxU@g@0!}H%&XUn!IZf)#$R`aRyd+>4*NSEa>{a( zQ+{|qR9ihQ(SAlHO0Q(kM`91AUsq|ZaM_vuUtJG!$|LoMv<5f;In)K+z}^pKlWzEk zz+81Ce+ED!GtQnz{qr#M|MLtaQ(oG!x-H{12;mK&!|<_$9@Dz+qv1fAq68OG<41-}t^!cY+h3EPKNSqtDKqntySg5Mff8$-sz}aV7WAqwjvkET&Vfr&E%_5EB#5h; zul%7EpHu;H?+^F4&s|I{_#*(d5B``OqwyNpYh4#6!u5b_H2Qj%(*kwdE30`8v(Y)8 ziqdE_#^m|MiaKK69$s2nh;-m~&eYH4{=3XFr2bVD=rF}OjDB{U1R@GA`p)?9y)LfQ z&9tKOP4tD;E~~d98I}5S@-C5&ulvCJ(gW|7bx%&e%wayv*8>Q!1eqM9diG#P*?R_| zy4z~o5TFbaFuA=f-y{(3YouiEnWA>sFya!s@}{3vabQ27ZUQOK=J@gP-)U=kZ@Krk zn41)=>(5#1y6|n_!{L_;k&-;BpA^m}Fur;G^5g^mML1*z@ZpUq4buVn8*o>OAm^hA zcu|}WTK)EupK={CRbba#`n;+5E}c{JieU!PH#Y0t*0=%B?_btauZlvS{d^j`*54YL zo%=)3TVK=ciScRhyB8F#SJxM1cG!$I-=QB&EoO{ymen%n%g2rE^wyeL{9H-Iv`!Qg z5p+zV6Nl`sH}J4;iVrpye&rnf@-f~zbvfFQ2whC$wJR!4hX*IJ==b%pqwsORKEC)l z`ep80zBy2|_E;PP^o?j&IKch-qg_&NCVxAAOSIfTKri2j2CDPC+=lGBu#Az@Ji3YM zC;zy#RK(NCv+IGQEB-yy2INIRDqN(Rl|p|y7u#h2hl~l>=}OoF;d$cb<1DEQ||4uTYH^iQg9NX$Qw+;YFYEsrM9 zlDXK^AJ{M1Ch*ISy`tdESJbH?4elm4cW4Wiva=HdncZS+_N!MQ^qKR&pO5~*vWQ5{ z3SZ9Ps7Ji<<^L-iODA+8Dpc8TB5-upzrg{?%3yr{IX#C0@_*M{>9qDjrMG@zIa%T6 z`*6Lo_~6jQx9aoKEDPR%>Re(#_j-AdOuwhLp!%&wknU3C$nYKJo-%EH^>_wXKW8{5A4S-B>oQc zhwHcrB7YO<;hNHC01UM_tpRfjkDuPU5$(fb26e5j2lkENKW!y_sQIsRe9^{jU`^#| z$*R9|@ak;sp(SFM+xQmNV(bw*ea_K2@bW!aoJK1YbJFr$ja)SV&Vv>Cwr=yn;a43L zZcp$G)#uiGet^zWZ9+Hr<~oy_W4W>aR-=snv^#INOhBOIbn5qk>dg4|2DJeKKb-wp zMf%pX;oe!K9+}Zq=_!byvAEY*0> z)V#uMqfPB&Pi(=H3}y}2*x$8zb-Cz;ujjzi%Ia;8+g2TV!N1SXc{+Q2r41V^-+f_0hjazN zW$h@5f5q1tw{oG_p?}aIg;F05Ry(_=KZ>NL zegM#?{q)ru3uMZO=XAy~nSJjOcjmpgOQSP=_;-=B)4vo={-R9$8jmR1cH*9B_@C!5 ztKjk5950OZXB%s-JMDY2)wxj6HV{Et=8kpsZ1z68tQyDWZ(fs3s69|{ip)JoirT+l z_vmRM5pEM^YtPMZqc)c!G~dW`w7yZYPRl&qrF|=MC93q)xdCeT+KA$?p(mz5K;EK0 z9!j=I6Vn?i8_@)2doj@1z)6)u?DvFD zRWD`wXYVvs zubn!?`t(mB$(csOo{0Lm%$&_Cu^1aw0=V7BPG&Fu^qI6ddUL4BM!lOHc_4)mW+han zPEgjoNSp_UrH>tBz=V#eM3+dYMGc1eS9iBwXj%S|#c8-%8*8%ZtpF>4jwrsj1%FW5 zbJXk7>gwtOnHQX(U){Z~e`#I4t?P+sMi`oP)fx$M93?>cN6O#SJ1c3ud zQ@)Ct&j??bnHh2cxfL*S_{3jK!#xG1-#B9=FuXtL$VsJ?fCIk7j6KD)jNFTkWr~?g z5j#Rlu^b%1d6lRWm0{JT-`RvOLISk(BhO^(LCGE&BRx`qL2h~z?nvU<)2atw@Lx!F zTEFExAj%Twz05vn6>lrv(LNMjmQ!G@bNNz+IAZcOIur|;Hd6Q%W`(?Cr~LRQwU8fS zKl8cjloS0uU-yGFGC-P6(N#vVQ>BT@{1s|nO@j0^CbOTyO}*egPR8(TG0M1 zp}R%zSuAr%zH(#HH9XlVaa%Gd*hxNYJ7v#PeTn+#sS^A-6j%3o1qc?GT?pb+Gi!)6 zR%Y^&tz?UDTggY<8eYZ)vI8-<)9kZ5m*?!nr1-+>vB3>U@*2k=elNj z=cfLoY-4IBON!H&bPvFkI>|HL$g08#NWvJX0(m~KNt%PB+ zE>)DRL!U;lBQsdQf%_73VqMrn3gs=dEpTxg;$-4dr&e znZJHSA0RgK^uJYDydL0JuzK$CFGG#5BMg!%SsE`T+hH8Y^2DH?g#BaPnbbF z*6$Fu=pSxT$Gmsi#ST!~Iz@v8170-`G1VfO&+1n8q#Mk6 z2^wwpY$qD5jSg-^4=tEBIXs4L_B|j4+=c=fLFOr9eM{vCn;K~>q2_qFSb9CLxbeLY zxrN*gn9iC(W%J*WtXo;;vDqMxgIC|dcm)Uj7#jkTXrokgZ+HU@6!pLV_!2LW666Bb zU2pvjK)Rvz)oZ0dDUT80LRf912>Pum7xbbiMgx=Op% z@6sc>8%WgF?M1lHUeV775EUz5Ty;7;tPHxrR!j#%B#&_uO~Ta_A68zI)y$-!*J=K9 z_}&^-rGh%`o7S)OPpj7O;Y6#UJ-ITdfzbD71I_^ypT5*csQf*B*#{?*F1-6LHM(he z+*u-xd?oNq0A>1)vCGya8mMJUHu;Fwkc+YkT+>s1v0(>V|x_}u#x!P13^_^ zr?(dltTDixI71!;jMV^z++>e$IQZPEP&uD*wIjhS>#8`+)KEB#Q zb+iJdoFsk9hpj@la^5dcbiyv!il5s!<-*`SbO^lrL|ki`ea>aM%w4Qf`m%^-`+v}c zHN@?Ew{}WT;2Z&8e<}SS80miiYpPB~>;ejt6MLae;(iY^PpY|ne@9fAVocSM@>R3V zN=`lVsxxg}V;SS4jWhIvQ+ei*RPc>xET^)_P)8I?(3Wp@{{pp*-y=i0O*3?br=m_J zEM>Yk?Tb985JyVnq)wiPow7PQc3|R`%&B8?%D(rr|KR~9k!+0Pz`MNw9ssn{_~4W( zv-6j*BoIjKJiX}vd5B`hv0Ln+HDfP*)f(0P!t*YOzR5zWllJ74_9gqwW&{IAOE}3oCNyvL%v+IZOIrWeZVH!V$-luew%;+;qRN8| zf7{@C7H1R5nsE}3YMng3Qniz9*Ka|K)q<6MvLe_0?}zicvhmuPYD|90!{x22M|X$e zZYSnvNY)SWlT`tZA;3v^E~0lr_4-bsOiXAg@Nk0m(XiKuVM}1TCJ!tK_I=$N+OK$u z5t@B~h3y$o$aVkCm7RTNYBYVozI>^8)$=PtnP%!x9_clX4%D3Hk*3C`LEJ{KTGgvA zbrrWt;JXAW61^U~Z?`x&+AN*4u&kJg#bCPi9)?Kd=EB(boXTGpk-UMqmwX07TPXH5 zGjOQZ-;<+@+#GuziNLF=_H3v93qz6QdE^Qt4vvKWS^G@+{u&A1QeJ<)=~q=?CdWHj zy4aKa33b*oN~g%mO-k*zG0#NAJ!UKH}*cS^66&IswUPjWb2YXo0!!HQFzHw^ZNqc?>M* z3VwYbL<1pYECz4*_$jDopeYx9P;KW1u%!a zRcR6Oz)7BpwriX$X?V^uV~q%-WuMW*qiYvjm@k}#Um>;sD6X0LHPUMcN#+NLBclO z6#RYRFyaOn_EAUVV98=pl(*2atjOQxb%iqyPwo7ooAY%V4qm*@l=N=B6Ppq7`{gRt z*@Cb+?+E^w`wQ^5U+|x7l+u3A1^7AP4+ojLAu}Pl2j6Snp^dQ3`dsSt-30?aeEqk_ zJ!8M)cP>aGpzOyV1$APoe>>@z@=qCE-xCz$eio$j`4MU?GfQ`yJbS`hqvdE0lJ=a9 zbcoJdm9saX&k1;?4URV%AGB3pwL3V-=om!+`KkitT>$iQV>Qm=t622h8}u^s65oE~ zzzU3m?Jl6?Xn>p0fc!;0h&G*V%Z^4&f!QIQElV+W564 z-dB4qLHz)BRYycF*!tycKJ+#^EQmf+1x6hBX$~%TVPUnkZRvHw;AJzafaKNwK4fUY z@Nb<*07M>U9!NJRGeqyjh?=&07)WAOmPTp{o0%ca%M`C=`NP$5-6!5qW2EI5siof} zej(B1FvD1f`DFjRWDqHH)^!1h-j1rx7`&Ez3ajyyrw`08|IKLY=u9J0aDGUi+NeF2 zz^r%;<$|BU`O3m`zJvcF8*4wazbDUDhz(9RNKU^1kohEl6OW76KQv*Fh1RT3B}mBlj2~tS9ZJ2=a)I;4O1wo; z_CZG*zAw256(;NWsz6-z24CMoIF)rbDM|?k`R5zy`}_Mh0`b(BeNFpMafqAN*&tJ2*{*U|Ita;TuaJ~^;{weGX~RXA^&^8?wu>FGOx zs53?F&l4EVCZHGju1w?y2lm&lv48_h&S6(Fd?a?=07?444Rzg=WugAoH~$H{R610& z2vdhCoUi#-W}xEwwXV%?-=fK( zjdP&9YhEd^`Z@p`Fcb$rh~j_y(&fs3QgbU8yV`JXP6X{7{ki%xe!)=fpEEMbxwpt) zQCL80O#1AKPb)U|6QonWzqKBH);Y`(YjT3dB@Y&^UNc%LFzu5H!lu_Pjq zHYB}K6>7f;!_|O5plpSEf*??nE7W6F#%&~o(^Nr9sz^BP4ev7$=-4Do>9!8OQDgXw zHY^WY6tEf0%Daw*Q1vj9F*6jHOQ&<5mjRmg5H+N@*b$-jC1FFPNSXZedQXCZTbH3k z%gWdQG&SiVhAx_!=vgtSsvr{sADZ(x0RoxPAGk4rs+gR2w$~Ilv|QnuD&S#Og*=KN91%EB8Z*|Rht9pUu^`n$ zqdHpc1A!(K8S^79l1GT+LHElfiy%8QD#Y+fKQL1|t)wR7A08hh1{p2j3AL|BA4NEz zYScO=MtRhonLwaI72sMJP}66A_7q%zf|IuHx5CdC_Z%3uBfG=)j-52N2}K<}Cwy}#ek|7@gK|DvL@rz1O%(EoJzXMd zE1T25RTk3`37=LTN|_04nG*-Z|5+rog9f@T;~jN6g-0P(MASNy27~+Sp*RKz?$?Fo zIEw*sGb3=5VE9r=25mdkp8i)EkEAjtOGY>kKaqOLd&j+G*2%YRhk&9PzzK66dFV7J zpNWQYcI~)46O}rF=&N#lCaP^x(H&!aeN)>V4{prQc>wjL%oZ8C1rqEm+L%^BX|P#s zXD6wbmqRFsr9Y13M74T^Ta<_#XUttd7S3iR@)~;ccI(*1_+{5hOArQuoTs@PCLDMQHqPa)q#(72kf?J51j!^`X@E`uj6NH0k+K!6!16Z2 ztt(8eq26vf1o&>Vo>gPBDgp$0->1$HB>Q!my`K!eKCF<=E`|AmqCM3TYaXFn4;r?8 z)3()$rk$=wfUZ}b1M_*$x(>OC6Wvf_RqGX^f)+O-&q=kGh_8}!j>lVm z&!Q4}3Z}va5X>OZg3_6vj9Om#LC3~7eX;n7;}oI+Q54^Y>W|5o^2p1!9scOga~!M% zsERTtY1u1~Fa?qB>iDuso?XTg;?{&@_kn`6F+f#$keR7Wf&D~0%5Un2vAd(jbP>#7 z8AbMx?S0BMxnileV~MI^4}v?iU8b+PRq&kS!`F7KKdKkW@+o~c@uJ^E8=HjKDkM!b z9ykcjIem=@C>o65A zDqwMAR(9d{>(O>W@-Ms9LCDUa>IRP5<7OmV#tE#wsxk?&!75-)r2Ou4tucWODkf9; zK2I6zY7d#Bq%RqC&Dkp(btRN!Oa(7)hZ9)LDAgE=D2&Sj+dNQ>e$7gvO4jC<;aE)RXr4w_#S` zR6}-*`%PhKLHE(N4tP9ylX(CED5!W6B2yYza6tywVj{y({cpxXXIy$xSF?7mN|ngr zXHYAi{O!XlO(g8wul@rstFQR34CisxiwKm;w;I%=mx@qF<5vd6VkVB3AfRG&UMhG5_nF4{54-h96mrZ=bHRqvjf~_t31Z?&k~4OOPVi}2*Nxdng4-VRfzG0Q z^{|4Os=Lwtrd>AKSexy{LaR<9x?e}DQmf6g{cUCvpt5H;82ewo(6Ps(ZYU#kzae_f zv57;>QOh({GXByy4)rZ{?2HDU3tcWO{OJ=6Wx^j$*l@J4^{eB>d=cE7SbB-Z7dL{dx)39^BKWMh8&6O$+Xb0+&eojtR%1l#*k+J$dTw5-anHOnZ> zOmSki(0*UC2DREYiN|_YYO;Or0?me~Ggj?_9XMsRe`Omoq_OnM$i;@3m*Z!HF3ZZO z$Py6KC|L2Dcdt`&3Xtl=814(Kec<2rHB`c7GF}AF=GN;? zi5Bxi#m{a!!|d$fj_?Lg0%4F+E;lsW2~FmohPSw-zLb8{>(A05D}n5funKjwY|`raD0XX>rB8812TPfmB2JInxpYn06>NU9nwaGvTT+)?UAmT} zxQq+@L@$aRP9Np>w%m!jS+Em4%k3>5JsWubxZ-BStlWaLtwTy%x#`aE=k3Z8&tEvZ z>D;+*jy%L_=-^u{Uvl~+@N^}H_|DghQg&$MoqM^|kjywvl2&SLHoz#&aS|6{)q6Z% zHJt<2kL6(gQs83tkZ{%*!+2}alQ3noM$n>dq_?}d47XIwLcc?|>Q|o-CkeCiOGV@= z{vPNLiBat^#5%nko}hoN({s%%n;JRQMw^LL9Pl8cku1Q1x7T9qmu~ppBD;7oqvF|+ zqAt!ZL*%=$dw`1s-?UnmZH&)rvQg6@-r8BZf-Jtsyxl1A-Ew1OG*g6kt1Kt*{C2ro zVTNaqy+VMlx<{SFHK~-=S8rd#Ur!9LtK1i48yl`wxnHmB;_Nr({8hGCkUwm7yeiGV zaCH{dLC-4UhgU#5+xz=JQCB0xD%J^+JkkhaFQE7!uLj1B1j>G+CRYBdFkCdLX6kn;LXZ7Yg(X4Jn_iSSj8@ zL4Xf!j%&2uEd&CdAt4_dV`*<8i64=_mMw>dp}yi<)!hZIE4WWCkjC`uvg_Jz{>rq} z(D^cPOJuwrqgAP=x)FfllZ3mHO*{vre6)Ze13CBa_HViNhx<&|%Id1Aj-5Y3c>IIm zjH_X8S8uN*IrDUqJ8!n3okL=lB)kxd!>u7v{U&zh*;p(k1v>xzksfXtSbWZYyA2%y zF3X%j*x~xf@md5m7f0doeh67m76&0`K5@S)%YTF>pASy??e`H@m>#!uE0YIBABiJTRmlFezrjnt;9pNhKtl`EvXY>NaoH#ZX< q`ikdL`%)Rh{QrOdrz0REOy_GneK=H0{QEn`gJAa%N_lt8p8XGi=E|`E diff --git a/vendor/modules.txt b/vendor/modules.txt index 01919a9696..1cec217cc1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -61,9 +61,6 @@ github.com/AzureAD/microsoft-authentication-library-for-go/apps/public # github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab ## explicit github.com/JeffAshton/win_pdh -# github.com/Jeffail/gabs v1.4.0 -## explicit -github.com/Jeffail/gabs # github.com/Jeffail/gabs/v2 v2.6.1 ## explicit github.com/Jeffail/gabs/v2 From e8d86e3bd27845508588458fc5150ebba39bc02a Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 17 Sep 2025 14:30:05 +0200 Subject: [PATCH 2/6] chore: bump github.com/openshift/api Signed-off-by: Simon Pasquier --- go.mod | 28 +- go.sum | 54 +- .../google/go-cmp/cmp/cmpopts/sort.go | 64 +- .../go-cmp/cmp/internal/function/func.go | 7 + .../github.com/google/go-cmp/cmp/options.go | 10 +- vendor/github.com/google/gofuzz/.travis.yml | 10 - .../github.com/google/gofuzz/CONTRIBUTING.md | 67 - vendor/github.com/google/gofuzz/doc.go | 18 - vendor/github.com/google/gofuzz/fuzz.go | 605 -------- .../openshift/api/.ci-operator.yaml | 2 +- .../github.com/openshift/api/.golangci.yaml | 98 +- .../github.com/openshift/api/Dockerfile.ocp | 6 +- vendor/github.com/openshift/api/Makefile | 2 +- vendor/github.com/openshift/api/OWNERS | 2 +- vendor/github.com/openshift/api/README.md | 2 + .../api/apiserver/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/apps/v1/generated.proto | 9 + .../github.com/openshift/api/apps/v1/types.go | 9 + .../api/apps/v1/zz_generated.deepcopy.go | 2 +- .../api/authorization/v1/generated.proto | 2 + .../openshift/api/authorization/v1/types.go | 2 + .../authorization/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/build/v1/generated.proto | 15 + .../openshift/api/build/v1/types.go | 15 + .../api/build/v1/zz_generated.deepcopy.go | 2 +- .../cloudnetwork/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/config/install.go | 3 +- .../openshift/api/config/v1/register.go | 4 + .../api/config/v1/types_apiserver.go | 36 +- .../api/config/v1/types_authentication.go | 455 +++++- .../config/v1/types_cluster_image_policy.go | 87 ++ .../api/config/v1/types_cluster_operator.go | 23 +- .../api/config/v1/types_cluster_version.go | 7 +- .../openshift/api/config/v1/types_console.go | 1 + .../openshift/api/config/v1/types_feature.go | 1 + .../api/config/v1/types_image_policy.go | 322 +++++ .../api/config/v1/types_infrastructure.go | 66 +- .../openshift/api/config/v1/types_ingress.go | 2 +- .../api/config/v1/types_kmsencryption.go | 55 + .../openshift/api/config/v1/types_network.go | 5 + .../openshift/api/config/v1/types_node.go | 4 +- .../api/config/v1/types_operatorhub.go | 1 + .../api/config/v1/zz_generated.deepcopy.go | 529 ++++++- ..._generated.featuregated-crd-manifests.yaml | 64 +- .../v1/zz_generated.swagger_doc_generated.go | 313 +++- .../api/config/v1alpha1/types_backup.go | 4 +- .../v1alpha1/types_cluster_monitoring.go | 371 ++++- .../api/config/v1alpha1/types_image_policy.go | 5 +- .../config/v1alpha1/zz_generated.deepcopy.go | 163 ++- .../zz_generated.swagger_doc_generated.go | 71 +- .../openshift/api/config/v1alpha2/Makefile | 3 + .../openshift/api/config/v1alpha2/doc.go | 7 + .../openshift/api/config/v1alpha2/register.go | 38 + .../api/config/v1alpha2/types_insights.go | 220 +++ .../config/v1alpha2/zz_generated.deepcopy.go | 243 ++++ ..._generated.featuregated-crd-manifests.yaml | 23 + .../zz_generated.swagger_doc_generated.go | 111 ++ .../api/console/v1/types_console_plugin.go | 4 +- .../api/console/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/envtest-releases.yaml | 13 + vendor/github.com/openshift/api/features.md | 102 +- .../openshift/api/features/features.go | 482 ++++--- .../api/features/legacyfeaturegates.go | 12 - .../api/helm/v1beta1/zz_generated.deepcopy.go | 2 +- .../image/docker10/zz_generated.deepcopy.go | 2 +- .../dockerpre012/zz_generated.deepcopy.go | 2 +- .../openshift/api/image/v1/generated.proto | 12 +- .../openshift/api/image/v1/types.go | 12 +- .../api/image/v1/zz_generated.deepcopy.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 4 +- .../openshift/api/imageregistry/v1/types.go | 5 +- .../imageregistry/v1/zz_generated.deepcopy.go | 2 +- ..._generated.featuregated-crd-manifests.yaml | 3 +- .../v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/legacyconfig/v1/types.go | 2 +- .../legacyconfig/v1/zz_generated.deepcopy.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../v1/types_controlplanemachineset.go | 5 +- .../api/machine/v1/types_nutanixprovider.go | 4 +- .../api/machine/v1/zz_generated.deepcopy.go | 2 +- .../v1/zz_generated.swagger_doc_generated.go | 2 +- .../machine/v1alpha1/zz_generated.deepcopy.go | 2 +- .../api/machine/v1beta1/types_awsprovider.go | 4 +- .../machine/v1beta1/types_azureprovider.go | 8 +- .../api/machine/v1beta1/types_machine.go | 1 + .../v1beta1/types_machinehealthcheck.go | 4 +- .../api/machine/v1beta1/types_machineset.go | 2 + .../api/machine/v1beta1/types_provider.go | 2 +- .../machine/v1beta1/zz_generated.deepcopy.go | 6 +- .../zz_generated.swagger_doc_generated.go | 4 +- .../monitoring/v1/zz_generated.deepcopy.go | 2 +- .../api/network/v1/zz_generated.deepcopy.go | 2 +- .../network/v1alpha1/zz_generated.deepcopy.go | 2 +- .../api/networkoperator/v1/generated.proto | 2 - .../networkoperator/v1/types_egressrouter.go | 4 +- .../v1/zz_generated.deepcopy.go | 2 +- .../api/oauth/v1/zz_generated.deepcopy.go | 2 +- .../api/openshiftcontrolplane/v1/types.go | 5 +- .../v1/zz_generated.deepcopy.go | 3 +- .../openshift/api/operator/v1/types.go | 3 +- .../api/operator/v1/types_authentication.go | 2 +- .../api/operator/v1/types_console.go | 4 +- .../operator/v1/types_csi_cluster_driver.go | 4 - .../openshift/api/operator/v1/types_etcd.go | 4 +- .../api/operator/v1/types_ingress.go | 8 +- .../api/operator/v1/types_kubeapiserver.go | 1 - .../operator/v1/types_machineconfiguration.go | 46 +- .../api/operator/v1/types_network.go | 26 +- .../api/operator/v1/zz_generated.deepcopy.go | 24 +- ..._generated.featuregated-crd-manifests.yaml | 5 +- .../v1/zz_generated.swagger_doc_generated.go | 30 +- .../api/operator/v1alpha1/types_etcdbackup.go | 1 - .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../api/osin/v1/zz_generated.deepcopy.go | 2 +- .../api/project/v1/zz_generated.deepcopy.go | 2 +- .../api/quota/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/route/v1/generated.proto | 1 + .../openshift/api/route/v1/types.go | 1 + .../api/route/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/samples/v1/generated.proto | 7 + .../openshift/api/samples/v1/types_config.go | 7 + .../api/samples/v1/zz_generated.deepcopy.go | 2 +- .../openshift/api/security/v1/generated.proto | 4 + .../openshift/api/security/v1/types.go | 4 + .../api/security/v1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../v1alpha1/types_shared_configmap.go | 2 +- .../v1alpha1/types_shared_secret.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- .../openshift/api/template/v1/generated.proto | 2 + .../openshift/api/template/v1/types.go | 2 + .../api/template/v1/zz_generated.deepcopy.go | 2 +- .../api/user/v1/zz_generated.deepcopy.go | 2 +- .../testify/assert/assertion_compare.go | 35 +- .../testify/assert/assertion_format.go | 34 +- .../testify/assert/assertion_forward.go | 68 +- .../testify/assert/assertion_order.go | 10 +- .../stretchr/testify/assert/assertions.go | 157 ++- .../testify/assert/yaml/yaml_custom.go | 25 + .../testify/assert/yaml/yaml_default.go | 37 + .../stretchr/testify/assert/yaml/yaml_fail.go | 18 + .../stretchr/testify/require/require.go | 432 +++--- .../stretchr/testify/require/require.go.tmpl | 2 +- .../testify/require/require_forward.go | 68 +- .../stretchr/testify/require/requirements.go | 2 +- vendor/golang.org/x/crypto/ssh/handshake.go | 61 +- vendor/golang.org/x/crypto/ssh/messages.go | 2 + vendor/golang.org/x/crypto/ssh/server.go | 50 +- vendor/golang.org/x/crypto/ssh/tcpip.go | 2 +- vendor/golang.org/x/net/context/context.go | 112 +- vendor/golang.org/x/net/context/go17.go | 72 - vendor/golang.org/x/net/context/go19.go | 20 - vendor/golang.org/x/net/context/pre_go17.go | 300 ---- vendor/golang.org/x/net/context/pre_go19.go | 109 -- vendor/golang.org/x/net/html/atom/table.go | 1256 +++++++++-------- vendor/golang.org/x/net/html/parse.go | 4 +- vendor/golang.org/x/net/html/token.go | 18 +- .../golang.org/x/net/http/httpproxy/proxy.go | 10 +- vendor/golang.org/x/net/http2/frame.go | 11 + vendor/golang.org/x/net/http2/http2.go | 39 +- vendor/golang.org/x/net/http2/server.go | 130 +- vendor/golang.org/x/net/http2/transport.go | 334 +---- vendor/golang.org/x/net/http2/write.go | 3 +- .../x/net/internal/httpcommon/ascii.go | 53 + .../httpcommon}/headermap.go | 24 +- .../x/net/internal/httpcommon/request.go | 467 ++++++ vendor/golang.org/x/net/proxy/per_host.go | 8 +- .../golang.org/x/net/websocket/websocket.go | 5 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 +- vendor/golang.org/x/sync/errgroup/go120.go | 13 - .../golang.org/x/sync/errgroup/pre_go120.go | 14 - vendor/golang.org/x/sys/cpu/cpu.go | 3 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 21 +- vendor/golang.org/x/sys/unix/auxv.go | 36 + .../golang.org/x/sys/unix/auxv_unsupported.go | 13 + .../golang.org/x/sys/unix/syscall_solaris.go | 87 ++ vendor/golang.org/x/sys/unix/zerrors_linux.go | 20 +- .../x/sys/unix/zerrors_linux_386.go | 3 + .../x/sys/unix/zerrors_linux_amd64.go | 3 + .../x/sys/unix/zerrors_linux_arm.go | 3 + .../x/sys/unix/zerrors_linux_arm64.go | 4 + .../x/sys/unix/zerrors_linux_loong64.go | 3 + .../x/sys/unix/zerrors_linux_mips.go | 3 + .../x/sys/unix/zerrors_linux_mips64.go | 3 + .../x/sys/unix/zerrors_linux_mips64le.go | 3 + .../x/sys/unix/zerrors_linux_mipsle.go | 3 + .../x/sys/unix/zerrors_linux_ppc.go | 3 + .../x/sys/unix/zerrors_linux_ppc64.go | 3 + .../x/sys/unix/zerrors_linux_ppc64le.go | 3 + .../x/sys/unix/zerrors_linux_riscv64.go | 3 + .../x/sys/unix/zerrors_linux_s390x.go | 3 + .../x/sys/unix/zerrors_linux_sparc64.go | 3 + .../x/sys/unix/zsyscall_solaris_amd64.go | 114 ++ .../x/sys/unix/zsysnum_linux_386.go | 4 + .../x/sys/unix/zsysnum_linux_amd64.go | 4 + .../x/sys/unix/zsysnum_linux_arm.go | 4 + .../x/sys/unix/zsysnum_linux_arm64.go | 4 + .../x/sys/unix/zsysnum_linux_loong64.go | 4 + .../x/sys/unix/zsysnum_linux_mips.go | 4 + .../x/sys/unix/zsysnum_linux_mips64.go | 4 + .../x/sys/unix/zsysnum_linux_mips64le.go | 4 + .../x/sys/unix/zsysnum_linux_mipsle.go | 4 + .../x/sys/unix/zsysnum_linux_ppc.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64.go | 4 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 4 + .../x/sys/unix/zsysnum_linux_riscv64.go | 4 + .../x/sys/unix/zsysnum_linux_s390x.go | 4 + .../x/sys/unix/zsysnum_linux_sparc64.go | 4 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 6 +- .../x/text/internal/number/format.go | 2 - vendor/golang.org/x/text/language/parse.go | 2 +- .../protobuf/encoding/protojson/decode.go | 5 - .../protobuf/encoding/prototext/decode.go | 5 - .../protobuf/internal/encoding/tag/tag.go | 8 +- .../protobuf/internal/filedesc/desc.go | 9 +- .../protobuf/internal/filedesc/desc_lazy.go | 9 - .../protobuf/internal/filetype/build.go | 2 +- .../protobuf/internal/flags/flags.go | 7 +- .../protobuf/internal/genid/goname.go | 5 - .../protobuf/internal/impl/codec_field.go | 75 - .../protobuf/internal/impl/codec_message.go | 3 - .../internal/impl/codec_message_opaque.go | 3 - .../protobuf/internal/impl/lazy.go | 2 +- .../protobuf/internal/impl/legacy_message.go | 5 +- .../protobuf/internal/impl/message.go | 13 - .../protobuf/internal/impl/message_opaque.go | 5 - .../protobuf/internal/impl/message_reflect.go | 5 - .../internal/impl/message_reflect_field.go | 76 - .../protobuf/internal/impl/pointer_unsafe.go | 1 - .../protobuf/internal/impl/validate.go | 24 +- .../protobuf/internal/impl/weak.go | 74 - .../protobuf/internal/version/version.go | 2 +- .../protobuf/proto/decode.go | 5 - .../protobuf/reflect/protodesc/desc.go | 8 +- .../protobuf/reflect/protodesc/desc_init.go | 1 - .../reflect/protodesc/desc_resolve.go | 26 +- .../reflect/protodesc/desc_validate.go | 12 - .../protobuf/reflect/protodesc/editions.go | 25 +- .../protobuf/reflect/protodesc/proto.go | 3 - .../protobuf/reflect/protoreflect/type.go | 12 +- .../types/descriptorpb/descriptor.pb.go | 12 +- .../types/gofeaturespb/go_features.pb.go | 12 +- .../protobuf/types/known/anypb/any.pb.go | 12 +- .../types/known/durationpb/duration.pb.go | 12 +- .../protobuf/types/known/emptypb/empty.pb.go | 12 +- .../types/known/fieldmaskpb/field_mask.pb.go | 12 +- .../types/known/structpb/struct.pb.go | 12 +- .../types/known/timestamppb/timestamp.pb.go | 12 +- .../types/known/wrapperspb/wrappers.pb.go | 12 +- .../k8s.io/apimachinery/pkg/api/errors/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/doc.go | 2 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 3 + .../pkg/api/operation/operation.go | 56 + .../apimachinery/pkg/api/validation/doc.go | 2 +- .../pkg/api/validation/generic.go | 2 +- .../pkg/apis/meta/internalversion/doc.go | 2 +- .../apis/meta/internalversion/scheme/doc.go | 2 +- .../pkg/apis/meta/internalversion/types.go | 2 - .../apimachinery/pkg/apis/meta/v1/doc.go | 2 +- .../pkg/apis/meta/v1/micro_time_fuzz.go | 13 +- .../pkg/apis/meta/v1/time_fuzz.go | 13 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 31 +- .../apis/meta/v1/unstructured/unstructured.go | 4 +- .../pkg/apis/meta/v1/validation/validation.go | 2 +- .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 2 +- .../k8s.io/apimachinery/pkg/conversion/doc.go | 2 +- .../pkg/conversion/queryparams/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/fields/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/labels/doc.go | 2 +- vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 2 +- .../apimachinery/pkg/runtime/interfaces.go | 1 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 39 + .../serializer/cbor/internal/modes/custom.go | 4 +- .../pkg/runtime/serializer/codec_factory.go | 23 +- .../runtime/serializer/json/collections.go | 230 +++ .../pkg/runtime/serializer/json/json.go | 16 +- .../serializer/protobuf/collections.go | 174 +++ .../pkg/runtime/serializer/protobuf/doc.go | 2 +- .../runtime/serializer/protobuf/protobuf.go | 87 +- .../apimachinery/pkg/runtime/types_proto.go | 127 +- vendor/k8s.io/apimachinery/pkg/types/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/diff/diff.go | 2 +- .../apimachinery/pkg/util/errors/doc.go | 2 +- .../apimachinery/pkg/util/framer/framer.go | 6 +- .../apimachinery/pkg/util/httpstream/doc.go | 2 +- .../pkg/util/httpstream/wsstream/doc.go | 2 +- .../pkg/util/intstr/instr_fuzz.go | 14 +- .../k8s.io/apimachinery/pkg/util/proxy/doc.go | 2 +- .../apimachinery/pkg/util/runtime/runtime.go | 46 +- .../k8s.io/apimachinery/pkg/util/sets/doc.go | 2 +- .../util/validation/field/error_matcher.go | 212 +++ .../pkg/util/validation/field/errors.go | 132 +- .../apimachinery/pkg/util/validation/ip.go | 278 ++++ .../pkg/util/validation/validation.go | 40 - .../apimachinery/pkg/util/version/doc.go | 2 +- .../apimachinery/pkg/util/version/version.go | 18 +- .../apimachinery/pkg/util/wait/backoff.go | 50 +- .../k8s.io/apimachinery/pkg/util/wait/doc.go | 2 +- .../k8s.io/apimachinery/pkg/util/wait/loop.go | 4 +- .../k8s.io/apimachinery/pkg/util/wait/wait.go | 9 +- .../apimachinery/pkg/util/waitgroup/doc.go | 2 +- .../apimachinery/pkg/util/yaml/decoder.go | 170 ++- .../pkg/util/yaml/stream_reader.go | 130 ++ vendor/k8s.io/apimachinery/pkg/version/doc.go | 4 +- .../k8s.io/apimachinery/pkg/version/types.go | 28 +- vendor/k8s.io/apimachinery/pkg/watch/doc.go | 2 +- .../apimachinery/pkg/watch/streamwatcher.go | 15 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 35 +- vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go | 146 +- vendor/modules.txt | 56 +- vendor/sigs.k8s.io/randfill/CONTRIBUTING.md | 43 + .../gofuzz => sigs.k8s.io/randfill}/LICENSE | 6 +- vendor/sigs.k8s.io/randfill/NOTICE | 24 + vendor/sigs.k8s.io/randfill/OWNERS | 8 + vendor/sigs.k8s.io/randfill/OWNERS_ALIASES | 14 + .../gofuzz => sigs.k8s.io/randfill}/README.md | 45 +- vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS | 16 + .../randfill}/bytesource/bytesource.go | 0 .../sigs.k8s.io/randfill/code-of-conduct.md | 3 + vendor/sigs.k8s.io/randfill/randfill.go | 682 +++++++++ 321 files changed, 9239 insertions(+), 4053 deletions(-) delete mode 100644 vendor/github.com/google/gofuzz/.travis.yml delete mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md delete mode 100644 vendor/github.com/google/gofuzz/doc.go delete mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_image_policy.go create mode 100644 vendor/github.com/openshift/api/config/v1/types_kmsencryption.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/Makefile create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/doc.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/register.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/types_insights.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha2/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go create mode 100644 vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/net/internal/httpcommon/ascii.go rename vendor/golang.org/x/net/{http2 => internal/httpcommon}/headermap.go (74%) create mode 100644 vendor/golang.org/x/net/internal/httpcommon/request.go delete mode 100644 vendor/golang.org/x/sync/errgroup/go120.go delete mode 100644 vendor/golang.org/x/sync/errgroup/pre_go120.go create mode 100644 vendor/golang.org/x/sys/unix/auxv.go create mode 100644 vendor/golang.org/x/sys/unix/auxv_unsupported.go delete mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/operation/operation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/ip.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go create mode 100644 vendor/sigs.k8s.io/randfill/CONTRIBUTING.md rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/LICENSE (99%) create mode 100644 vendor/sigs.k8s.io/randfill/NOTICE create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS create mode 100644 vendor/sigs.k8s.io/randfill/OWNERS_ALIASES rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/README.md (53%) create mode 100644 vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS rename vendor/{github.com/google/gofuzz => sigs.k8s.io/randfill}/bytesource/bytesource.go (100%) create mode 100644 vendor/sigs.k8s.io/randfill/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/randfill/randfill.go diff --git a/go.mod b/go.mod index d549017d30..dc9ec5f883 100644 --- a/go.mod +++ b/go.mod @@ -15,7 +15,7 @@ require ( github.com/onsi/ginkgo/v2 v2.22.0 github.com/onsi/gomega v1.36.1 github.com/openshift-eng/openshift-tests-extension v0.0.0-20250702172817-97309544869d - github.com/openshift/api v0.0.0-20250320170726-75d64d71980b + github.com/openshift/api v0.0.0-20250916150132-83b017b06367 github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 github.com/openshift/library-go v0.0.0-20250402180609-ce2ba53fb2a4 github.com/pborman/uuid v1.2.1 @@ -26,16 +26,16 @@ require ( github.com/prometheus/common v0.60.0 github.com/prometheus/prometheus v0.54.1 github.com/spf13/cobra v1.8.1 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 github.com/tidwall/gjson v1.18.0 - golang.org/x/net v0.34.0 - golang.org/x/sync v0.10.0 - golang.org/x/text v0.21.0 + golang.org/x/net v0.38.0 + golang.org/x/sync v0.12.0 + golang.org/x/text v0.23.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.32.2 + k8s.io/api v0.33.2 k8s.io/apiextensions-apiserver v0.32.2 - k8s.io/apimachinery v0.32.2 + k8s.io/apimachinery v0.33.2 k8s.io/apiserver v0.32.2 k8s.io/autoscaler/vertical-pod-autoscaler v1.0.0 k8s.io/client-go v0.32.2 @@ -101,8 +101,7 @@ require ( github.com/google/cadvisor v0.51.0 // indirect github.com/google/cel-go v0.22.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect @@ -161,11 +160,11 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.32.0 // indirect + golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 // indirect golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/term v0.28.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect @@ -173,7 +172,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.36.3 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -185,7 +184,7 @@ require ( k8s.io/csi-translation-lib v0.0.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect k8s.io/kms v0.32.2 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect k8s.io/kubelet v0.0.0 // indirect @@ -193,6 +192,7 @@ require ( k8s.io/pod-security-admission v0.0.0 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index f963f8e12f..6affc10cb2 100644 --- a/go.sum +++ b/go.sum @@ -259,8 +259,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -428,8 +428,8 @@ github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jD github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250702172817-97309544869d h1:nlVcuw7cyXIYMtQn97y/CMq1yoovvLnc2AFqi15DgXA= github.com/openshift-eng/openshift-tests-extension v0.0.0-20250702172817-97309544869d/go.mod h1:6gkP5f2HL0meusT0Aim8icAspcD1cG055xxBZ9yC68M= -github.com/openshift/api v0.0.0-20250320170726-75d64d71980b h1:GGuFSHESP0BSOu70AqV4u9IVrjYdaeu4Id+HXRIOvkw= -github.com/openshift/api v0.0.0-20250320170726-75d64d71980b/go.mod h1:yk60tHAmHhtVpJQo3TwVYq2zpuP70iJIFDCmeKMIzPw= +github.com/openshift/api v0.0.0-20250916150132-83b017b06367 h1:bG88JdzHVMY/o3b0iGlxjoAieRv2gQlAGKIK7CBGDAw= +github.com/openshift/api v0.0.0-20250916150132-83b017b06367/go.mod h1:SPLf21TYPipzCO67BURkCfK6dcIIxx0oNRVWaOyRcXM= github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 h1:4iliLcvr1P9EUMZgIaSNEKNQQzBn+L6PSequlFOuB6Q= github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7/go.mod h1:2tcufBE4Cu6RNgDCxcUJepa530kGo5GFVfR9BSnndhI= github.com/openshift/kubernetes v1.30.1-0.20250704150419-38c60a516ecb h1:S4q4g0HsMcn37eyooQJaYOBpdcghr64OiCEBEWHUx8U= @@ -512,8 +512,8 @@ github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= @@ -544,8 +544,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -619,8 +620,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= +golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -686,8 +687,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -706,8 +707,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -746,19 +747,19 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -896,8 +897,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= -google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -933,8 +934,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= -k8s.io/apimachinery v0.32.2 h1:yoQBR9ZGkA6Rgmhbp/yuT9/g+4lxtsGYwW6dR6BDPLQ= -k8s.io/apimachinery v0.32.2/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= +k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= k8s.io/autoscaler/vertical-pod-autoscaler v1.0.0 h1:y0TgWoHaeYEv3L1MfLC+D2WVxyN1fGr6axURHXq+wHE= @@ -951,8 +952,8 @@ k8s.io/kms v0.32.2 h1:7Ff23ht7W40gTcDwUC8G5WjX5W/nxD8WxbNhIYYNZCI= k8s.io/kms v0.32.2/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= k8s.io/kube-aggregator v0.32.1 h1:cztPyIHbo6tgrhYHDqmdmvxUufJKuxgAC/vog7yeWek= k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/metrics v0.31.1 h1:h4I4dakgh/zKflWYAOQhwf0EXaqy8LxAIyE/GBvxqRc= k8s.io/metrics v0.31.1/go.mod h1:JuH1S9tJiH9q1VCY0yzSCawi7kzNLsDzlWDJN4xR+iA= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= @@ -968,8 +969,9 @@ sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7np sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= -sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016 h1:kXv6kKdoEtedwuqMmkqhbkgvYKeycVbC8+iPCP9j5kQ= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= diff --git a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go index c6d09dae40..720f3cdf57 100644 --- a/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/cmpopts/sort.go @@ -14,22 +14,29 @@ import ( ) // SortSlices returns a [cmp.Transformer] option that sorts all []V. -// The less function must be of the form "func(T, T) bool" which is used to -// sort any slice with element type V that is assignable to T. +// The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any slice with element type V that is assignable to T. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // -// The less function does not have to be "total". That is, if !less(x, y) and -// !less(y, x) for two elements x and y, their relative order is maintained. +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) +// +// The function does not have to be "total". That is, if x != y, but +// less or compare report inequality, their relative order is maintained. // // SortSlices can be used in conjunction with [EquateEmpty]. -func SortSlices(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortSlices(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ss := sliceSorter{vf.Type().In(0), vf} return cmp.FilterValues(ss.filter, cmp.Transformer("cmpopts.SortSlices", ss.sort)) @@ -79,28 +86,40 @@ func (ss sliceSorter) checkSort(v reflect.Value) { } func (ss sliceSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i), v.Index(j) - return ss.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ss.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } -// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be a -// sorted []struct{K, V}. The less function must be of the form -// "func(T, T) bool" which is used to sort any map with key K that is -// assignable to T. +// SortMaps returns a [cmp.Transformer] option that flattens map[K]V types to be +// a sorted []struct{K, V}. The lessOrCompareFunc function must be either +// a less function of the form "func(T, T) bool" or +// a compare function of the format "func(T, T) int" +// which is used to sort any map with key K that is assignable to T. // // Flattening the map into a slice has the property that [cmp.Equal] is able to // use [cmp.Comparer] options on K or the K.Equal method if it exists. // -// The less function must be: +// A less function must be: // - Deterministic: less(x, y) == less(x, y) // - Irreflexive: !less(x, x) // - Transitive: if !less(x, y) and !less(y, z), then !less(x, z) // - Total: if x != y, then either less(x, y) or less(y, x) // +// A compare function must be: +// - Deterministic: compare(x, y) == compare(x, y) +// - Irreflexive: compare(x, x) == 0 +// - Transitive: if compare(x, y) < 0 and compare(y, z) < 0, then compare(x, z) < 0 +// - Total: if x != y, then compare(x, y) != 0 +// // SortMaps can be used in conjunction with [EquateEmpty]. -func SortMaps(lessFunc interface{}) cmp.Option { - vf := reflect.ValueOf(lessFunc) - if !function.IsType(vf.Type(), function.Less) || vf.IsNil() { - panic(fmt.Sprintf("invalid less function: %T", lessFunc)) +func SortMaps(lessOrCompareFunc interface{}) cmp.Option { + vf := reflect.ValueOf(lessOrCompareFunc) + if (!function.IsType(vf.Type(), function.Less) && !function.IsType(vf.Type(), function.Compare)) || vf.IsNil() { + panic(fmt.Sprintf("invalid less or compare function: %T", lessOrCompareFunc)) } ms := mapSorter{vf.Type().In(0), vf} return cmp.FilterValues(ms.filter, cmp.Transformer("cmpopts.SortMaps", ms.sort)) @@ -143,5 +162,10 @@ func (ms mapSorter) checkSort(v reflect.Value) { } func (ms mapSorter) less(v reflect.Value, i, j int) bool { vx, vy := v.Index(i).Field(0), v.Index(j).Field(0) - return ms.fnc.Call([]reflect.Value{vx, vy})[0].Bool() + vo := ms.fnc.Call([]reflect.Value{vx, vy})[0] + if vo.Kind() == reflect.Bool { + return vo.Bool() + } else { + return vo.Int() < 0 + } } diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d43623..def01a6be3 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3b3..ba3fce81ff 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml deleted file mode 100644 index 061d72ae07..0000000000 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go - -go: - - 1.11.x - - 1.12.x - - 1.13.x - - master - -script: - - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md deleted file mode 100644 index 97c1b34fd5..0000000000 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ /dev/null @@ -1,67 +0,0 @@ -# How to contribute # - -We'd love to accept your patches and contributions to this project. There are -just a few small guidelines you need to follow. - - -## Contributor License Agreement ## - -Contributions to any Google project must be accompanied by a Contributor -License Agreement. This is not a copyright **assignment**, it simply gives -Google permission to use and redistribute your contributions as part of the -project. - - * If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an [individual - CLA][]. - - * If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a [corporate CLA][]. - -You generally only need to submit a CLA once, so if you've already submitted -one (even if it was for a different project), you probably don't need to do it -again. - -[individual CLA]: https://developers.google.com/open-source/cla/individual -[corporate CLA]: https://developers.google.com/open-source/cla/corporate - - -## Submitting a patch ## - - 1. It's generally best to start by opening a new issue describing the bug or - feature you're intending to fix. Even if you think it's relatively minor, - it's helpful to know what people are working on. Mention in the initial - issue that you are planning to work on that bug or feature so that it can - be assigned to you. - - 1. Follow the normal process of [forking][] the project, and setup a new - branch to work in. It's important that each group of changes be done in - separate branches in order to ensure that a pull request only includes the - commits related to that bug or feature. - - 1. Go makes it very simple to ensure properly formatted code, so always run - `go fmt` on your code before committing it. You should also run - [golint][] over your code. As noted in the [golint readme][], it's not - strictly necessary that your code be completely "lint-free", but this will - help you find common style issues. - - 1. Any significant changes should almost always be accompanied by tests. The - project already has good test coverage, so look at some of the existing - tests if you're unsure how to go about it. [gocov][] and [gocov-html][] - are invaluable tools for seeing which parts of your code aren't being - exercised by your tests. - - 1. Do your best to have [well-formed commit messages][] for each change. - This provides consistency throughout the project, and ensures that commit - messages are able to be formatted properly by various git tools. - - 1. Finally, push the commits to your fork and submit a [pull request][]. - -[forking]: https://help.github.com/articles/fork-a-repo -[golint]: https://github.com/golang/lint -[golint readme]: https://github.com/golang/lint/blob/master/README -[gocov]: https://github.com/axw/gocov -[gocov-html]: https://github.com/matm/gocov-html -[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html -[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits -[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go deleted file mode 100644 index 9f9956d4a6..0000000000 --- a/vendor/github.com/google/gofuzz/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fuzz is a library for populating go objects with random values. -package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go deleted file mode 100644 index 761520a8ce..0000000000 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ /dev/null @@ -1,605 +0,0 @@ -/* -Copyright 2014 Google Inc. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fuzz - -import ( - "fmt" - "math/rand" - "reflect" - "regexp" - "time" - - "github.com/google/gofuzz/bytesource" - "strings" -) - -// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. -type fuzzFuncMap map[reflect.Type]reflect.Value - -// Fuzzer knows how to fill any object with random fields. -type Fuzzer struct { - fuzzFuncs fuzzFuncMap - defaultFuzzFuncs fuzzFuncMap - r *rand.Rand - nilChance float64 - minElements int - maxElements int - maxDepth int - skipFieldPatterns []*regexp.Regexp -} - -// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, -// RandSource, NilChance, or NumElements in any order. -func New() *Fuzzer { - return NewWithSeed(time.Now().UnixNano()) -} - -func NewWithSeed(seed int64) *Fuzzer { - f := &Fuzzer{ - defaultFuzzFuncs: fuzzFuncMap{ - reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), - }, - - fuzzFuncs: fuzzFuncMap{}, - r: rand.New(rand.NewSource(seed)), - nilChance: .2, - minElements: 1, - maxElements: 10, - maxDepth: 100, - } - return f -} - -// NewFromGoFuzz is a helper function that enables using gofuzz (this -// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous -// fuzzing. Essentially, it enables translating the fuzzing bytes from -// go-fuzz to any Go object using this library. -// -// This implementation promises a constant translation from a given slice of -// bytes to the fuzzed objects. This promise will remain over future -// versions of Go and of this library. -// -// Note: the returned Fuzzer should not be shared between multiple goroutines, -// as its deterministic output will no longer be available. -// -// Example: use go-fuzz to test the function `MyFunc(int)` in the package -// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: -// -// // +build gofuzz -// package mypacakge -// import fuzz "github.com/google/gofuzz" -// func Fuzz(data []byte) int { -// var i int -// fuzz.NewFromGoFuzz(data).Fuzz(&i) -// MyFunc(i) -// return 0 -// } -func NewFromGoFuzz(data []byte) *Fuzzer { - return New().RandSource(bytesource.New(data)) -} - -// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. -// -// Each entry in fuzzFuncs must be a function taking two parameters. -// The first parameter must be a pointer or map. It is the variable that -// function will fill with random data. The second parameter must be a -// fuzz.Continue, which will provide a source of randomness and a way -// to automatically continue fuzzing smaller pieces of the first parameter. -// -// These functions are called sensibly, e.g., if you wanted custom string -// fuzzing, the function `func(s *string, c fuzz.Continue)` would get -// called and passed the address of strings. Maps and pointers will always -// be made/new'd for you, ignoring the NilChange option. For slices, it -// doesn't make much sense to pre-create them--Fuzzer doesn't know how -// long you want your slice--so take a pointer to a slice, and make it -// yourself. (If you don't want your map/pointer type pre-made, take a -// pointer to it, and make it yourself.) See the examples for a range of -// custom functions. -func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { - for i := range fuzzFuncs { - v := reflect.ValueOf(fuzzFuncs[i]) - if v.Kind() != reflect.Func { - panic("Need only funcs!") - } - t := v.Type() - if t.NumIn() != 2 || t.NumOut() != 0 { - panic("Need 2 in and 0 out params!") - } - argT := t.In(0) - switch argT.Kind() { - case reflect.Ptr, reflect.Map: - default: - panic("fuzzFunc must take pointer or map type") - } - if t.In(1) != reflect.TypeOf(Continue{}) { - panic("fuzzFunc's second parameter must be type fuzz.Continue") - } - f.fuzzFuncs[argT] = v - } - return f -} - -// RandSource causes f to get values from the given source of randomness. -// Use if you want deterministic fuzzing. -func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { - f.r = rand.New(s) - return f -} - -// NilChance sets the probability of creating a nil pointer, map, or slice to -// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. -func (f *Fuzzer) NilChance(p float64) *Fuzzer { - if p < 0 || p > 1 { - panic("p should be between 0 and 1, inclusive.") - } - f.nilChance = p - return f -} - -// NumElements sets the minimum and maximum number of elements that will be -// added to a non-nil map or slice. -func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { - if atLeast > atMost { - panic("atLeast must be <= atMost") - } - if atLeast < 0 { - panic("atLeast must be >= 0") - } - f.minElements = atLeast - f.maxElements = atMost - return f -} - -func (f *Fuzzer) genElementCount() int { - if f.minElements == f.maxElements { - return f.minElements - } - return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) -} - -func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() >= f.nilChance -} - -// MaxDepth sets the maximum number of recursive fuzz calls that will be made -// before stopping. This includes struct members, pointers, and map and slice -// elements. -func (f *Fuzzer) MaxDepth(d int) *Fuzzer { - f.maxDepth = d - return f -} - -// Skip fields which match the supplied pattern. Call this multiple times if needed -// This is useful to skip XXX_ fields generated by protobuf -func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { - f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) - return f -} - -// Fuzz recursively fills all of obj's fields with something random. First -// this tries to find a custom fuzz function (see Funcs). If there is no -// custom function this tests whether the object implements fuzz.Interface and, -// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if -// there is a default fuzz function provided by this package. If all of that -// fails, this will generate random values for all primitive fields and then -// recurse for all non-primitives. -// -// This is safe for cyclic or tree-like structs, up to a limit. Use the -// MaxDepth method to adjust how deep you need it to recurse. -// -// obj must be a pointer. Only exported (public) fields can be set (thanks, -// golang :/ ) Intended for tests, so will panic on bad input or unimplemented -// fields. -func (f *Fuzzer) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, 0) -} - -// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -// Not safe for cyclic or tree-like structs! -// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) -// Intended for tests, so will panic on bad input or unimplemented fields. -func (f *Fuzzer) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - f.fuzzWithContext(v, flagNoCustomFuzz) -} - -const ( - // Do not try to find a custom fuzz function. Does not apply recursively. - flagNoCustomFuzz uint64 = 1 << iota -) - -func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { - fc := &fuzzerContext{fuzzer: f} - fc.doFuzz(v, flags) -} - -// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer -// be thread-safe. -type fuzzerContext struct { - fuzzer *Fuzzer - curDepth int -} - -func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { - if fc.curDepth >= fc.fuzzer.maxDepth { - return - } - fc.curDepth++ - defer func() { fc.curDepth-- }() - - if !v.CanSet() { - return - } - - if flags&flagNoCustomFuzz == 0 { - // Check for both pointer and non-pointer custom functions. - if v.CanAddr() && fc.tryCustom(v.Addr()) { - return - } - if fc.tryCustom(v) { - return - } - } - - if fn, ok := fillFuncMap[v.Kind()]; ok { - fn(v, fc.fuzzer.r) - return - } - - switch v.Kind() { - case reflect.Map: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.MakeMap(v.Type())) - n := fc.fuzzer.genElementCount() - for i := 0; i < n; i++ { - key := reflect.New(v.Type().Key()).Elem() - fc.doFuzz(key, 0) - val := reflect.New(v.Type().Elem()).Elem() - fc.doFuzz(val, 0) - v.SetMapIndex(key, val) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Ptr: - if fc.fuzzer.genShouldFill() { - v.Set(reflect.New(v.Type().Elem())) - fc.doFuzz(v.Elem(), 0) - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Slice: - if fc.fuzzer.genShouldFill() { - n := fc.fuzzer.genElementCount() - v.Set(reflect.MakeSlice(v.Type(), n, n)) - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Array: - if fc.fuzzer.genShouldFill() { - n := v.Len() - for i := 0; i < n; i++ { - fc.doFuzz(v.Index(i), 0) - } - return - } - v.Set(reflect.Zero(v.Type())) - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - skipField := false - fieldName := v.Type().Field(i).Name - for _, pattern := range fc.fuzzer.skipFieldPatterns { - if pattern.MatchString(fieldName) { - skipField = true - break - } - } - if !skipField { - fc.doFuzz(v.Field(i), 0) - } - } - case reflect.Chan: - fallthrough - case reflect.Func: - fallthrough - case reflect.Interface: - fallthrough - default: - panic(fmt.Sprintf("Can't handle %#v", v.Interface())) - } -} - -// tryCustom searches for custom handlers, and returns true iff it finds a match -// and successfully randomizes v. -func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { - // First: see if we have a fuzz function for it. - doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] - if !ok { - // Second: see if it can fuzz itself. - if v.CanInterface() { - intf := v.Interface() - if fuzzable, ok := intf.(Interface); ok { - fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) - return true - } - } - // Finally: see if there is a default fuzz function. - doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] - if !ok { - return false - } - } - - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.New(v.Type().Elem())) - } - case reflect.Map: - if v.IsNil() { - if !v.CanSet() { - return false - } - v.Set(reflect.MakeMap(v.Type())) - } - default: - return false - } - - doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ - fc: fc, - Rand: fc.fuzzer.r, - })}) - return true -} - -// Interface represents an object that knows how to fuzz itself. Any time we -// find a type that implements this interface we will delegate the act of -// fuzzing itself. -type Interface interface { - Fuzz(c Continue) -} - -// Continue can be passed to custom fuzzing functions to allow them to use -// the correct source of randomness and to continue fuzzing their members. -type Continue struct { - fc *fuzzerContext - - // For convenience, Continue implements rand.Rand via embedding. - // Use this for generating any randomness if you want your fuzzing - // to be repeatable for a given seed. - *rand.Rand -} - -// Fuzz continues fuzzing obj. obj must be a pointer. -func (c Continue) Fuzz(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, 0) -} - -// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for -// obj's type will not be called and obj will not be tested for fuzz.Interface -// conformance. This applies only to obj and not other instances of obj's -// type. -func (c Continue) FuzzNoCustom(obj interface{}) { - v := reflect.ValueOf(obj) - if v.Kind() != reflect.Ptr { - panic("needed ptr!") - } - v = v.Elem() - c.fc.doFuzz(v, flagNoCustomFuzz) -} - -// RandString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func (c Continue) RandString() string { - return randString(c.Rand) -} - -// RandUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func (c Continue) RandUint64() uint64 { - return randUint64(c.Rand) -} - -// RandBool returns true or false randomly. -func (c Continue) RandBool() bool { - return randBool(c.Rand) -} - -func fuzzInt(v reflect.Value, r *rand.Rand) { - v.SetInt(int64(randUint64(r))) -} - -func fuzzUint(v reflect.Value, r *rand.Rand) { - v.SetUint(randUint64(r)) -} - -func fuzzTime(t *time.Time, c Continue) { - var sec, nsec int64 - // Allow for about 1000 years of random time values, which keeps things - // like JSON parsing reasonably happy. - sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) - c.Fuzz(&nsec) - *t = time.Unix(sec, nsec) -} - -var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ - reflect.Bool: func(v reflect.Value, r *rand.Rand) { - v.SetBool(randBool(r)) - }, - reflect.Int: fuzzInt, - reflect.Int8: fuzzInt, - reflect.Int16: fuzzInt, - reflect.Int32: fuzzInt, - reflect.Int64: fuzzInt, - reflect.Uint: fuzzUint, - reflect.Uint8: fuzzUint, - reflect.Uint16: fuzzUint, - reflect.Uint32: fuzzUint, - reflect.Uint64: fuzzUint, - reflect.Uintptr: fuzzUint, - reflect.Float32: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(float64(r.Float32())) - }, - reflect.Float64: func(v reflect.Value, r *rand.Rand) { - v.SetFloat(r.Float64()) - }, - reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) - }, - reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - v.SetComplex(complex(r.Float64(), r.Float64())) - }, - reflect.String: func(v reflect.Value, r *rand.Rand) { - v.SetString(randString(r)) - }, - reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") - }, -} - -// randBool returns true or false randomly. -func randBool(r *rand.Rand) bool { - return r.Int31()&(1<<30) == 0 -} - -type int63nPicker interface { - Int63n(int64) int64 -} - -// UnicodeRange describes a sequential range of unicode characters. -// Last must be numerically greater than First. -type UnicodeRange struct { - First, Last rune -} - -// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. -// To be useful, each range must have at least one character (First <= Last) and -// there must be at least one range. -type UnicodeRanges []UnicodeRange - -// choose returns a random unicode character from the given range, using the -// given randomness source. -func (ur UnicodeRange) choose(r int63nPicker) rune { - count := int64(ur.Last - ur.First + 1) - return ur.First + rune(r.Int63n(count)) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from the range ur. If there are no characters -// in the range (cr.Last < cr.First), this will panic. -func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { - ur.check() - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// check is a function that used to check whether the first of ur(UnicodeRange) -// is greater than the last one. -func (ur UnicodeRange) check() { - if ur.Last < ur.First { - panic("The last encoding must be greater than the first one.") - } -} - -// randString of UnicodeRange makes a random string up to 20 characters long. -// Each character is selected form ur(UnicodeRange). -func (ur UnicodeRange) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur.choose(r)) - } - return sb.String() -} - -// defaultUnicodeRanges sets a default unicode range when user do not set -// CustomStringFuzzFunc() but wants fuzz string. -var defaultUnicodeRanges = UnicodeRanges{ - {' ', '~'}, // ASCII characters - {'\u00a0', '\u02af'}, // Multi-byte encoded characters - {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) -} - -// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. -// Each character is selected from one of the ranges of ur(UnicodeRanges). -// Each range has an equal probability of being chosen. If there are no ranges, -// or a selected range has no characters (.Last < .First), this will panic. -// Do not modify any of the ranges in ur after calling this function. -func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { - // Check unicode ranges slice is empty. - if len(ur) == 0 { - panic("UnicodeRanges is empty.") - } - // if not empty, each range should be checked. - for i := range ur { - ur[i].check() - } - return func(s *string, c Continue) { - *s = ur.randString(c.Rand) - } -} - -// randString of UnicodeRanges makes a random string up to 20 characters long. -// Each character is selected form one of the ranges of ur(UnicodeRanges), -// and each range has an equal probability of being chosen. -func (ur UnicodeRanges) randString(r *rand.Rand) string { - n := r.Intn(20) - sb := strings.Builder{} - sb.Grow(n) - for i := 0; i < n; i++ { - sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) - } - return sb.String() -} - -// randString makes a random string up to 20 characters long. The returned string -// may include a variety of (valid) UTF-8 encodings. -func randString(r *rand.Rand) string { - return defaultUnicodeRanges.randString(r) -} - -// randUint64 makes random 64 bit numbers. -// Weirdly, rand doesn't have a function that gives you 64 random bits. -func randUint64(r *rand.Rand) uint64 { - return uint64(r.Uint32())<<32 | uint64(r.Uint32()) -} diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml index 7c15f83e3e..461415cbc5 100644 --- a/vendor/github.com/openshift/api/.ci-operator.yaml +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -1,4 +1,4 @@ build_root_image: name: release namespace: openshift - tag: rhel-9-release-golang-1.23-openshift-4.19 + tag: rhel-9-release-golang-1.24-openshift-4.20 diff --git a/vendor/github.com/openshift/api/.golangci.yaml b/vendor/github.com/openshift/api/.golangci.yaml index 848960e946..6a22f61576 100644 --- a/vendor/github.com/openshift/api/.golangci.yaml +++ b/vendor/github.com/openshift/api/.golangci.yaml @@ -1,25 +1,87 @@ -linters-settings: - custom: - kal: - type: "module" - description: KAL is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. - settings: - linters: - enable: - - "maxlength" - - "nobools" - - "statussubresource" - lintersConfig: - conditions: - isFirstField: Warn - useProtobuf: Ignore - usePatchStrategy: Ignore +version: "2" linters: - disable-all: true + default: none enable: - - kal + - kubeapilinter + settings: + custom: + kubeapilinter: + path: tools/_output/bin/kube-api-linter.so + description: kubeapilinter is the Kube-API-Linter and lints Kube like APIs based on API conventions and best practices. + settings: + linters: + enable: + - maxlength + - nobools + - nomaps + - statussubresource + lintersConfig: + conditions: + isFirstField: Warn + usePatchStrategy: Ignore + useProtobuf: Ignore + optionalfields: + pointers: + preference: WhenRequired + policy: SuggestFix + omitEmpty: + # This will force omitempty on optional fields. + # This is in line with upstream guidance where optional fields should be omitted + # from the serialized output unless they are non-zero. + policy: SuggestFix + omitzero: + # This will force omitzero on optional struct fields. + # This means they can be omitted correctly and prevents the need for pointers to structs. + policy: SuggestFix + requiredFields: + pointers: + # This will force pointers when the field is required, but only when the zero + # value is a valid user choice, and has a semantic difference to being omitted (e.g. replicas allows 0). + policy: SuggestFix + omitempty: + # This will force omitempty on required fields. + # We do this so that the behaviour of not setting a value for the field is the same between + # both structured and unstructured clients. + policy: SuggestFix + omitzero: + # This will force omitzero on required struct fields. + # This means they can be omitted correctly and prevents the need for pointers to structs. + policy: SuggestFix + uniqueMarkers: + customMarkers: + - identifier: "openshift:validation:FeatureGateAwareEnum" + attributes: + - featureGate + - requiredFeatureGate + - identifier: "openshift:validation:FeatureGateMaxItems" + attributes: + - featureGate + - requiredFeatureGate + - identifier: "openshift:validation:FeatureGateAwareXValidation" + attributes: + - featureGate + - requiredFeatureGate + - rule + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - third_party$ + - builtin$ + - examples$ issues: # We have a lot of existing issues. # Want to make sure that those adding new fields have an # opportunity to fix them when running the linter locally. max-issues-per-linter: 1000 +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/vendor/github.com/openshift/api/Dockerfile.ocp b/vendor/github.com/openshift/api/Dockerfile.ocp index 8d50096ed0..0a4c98c488 100644 --- a/vendor/github.com/openshift/api/Dockerfile.ocp +++ b/vendor/github.com/openshift/api/Dockerfile.ocp @@ -1,10 +1,10 @@ -FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.23-openshift-4.19 AS builder +FROM registry.ci.openshift.org/ocp/builder:rhel-9-golang-1.24-openshift-4.20 AS builder WORKDIR /go/src/github.com/openshift/api COPY . . ENV GO_PACKAGE github.com/openshift/api RUN make build --warn-undefined-variables -FROM registry.ci.openshift.org/ocp/4.19:base-rhel9 +FROM registry.ci.openshift.org/ocp/4.20:base-rhel9 # copy the built binaries to /usr/bin COPY --from=builder /go/src/github.com/openshift/api/render /usr/bin/ @@ -15,7 +15,7 @@ RUN mkdir -p /usr/share/bootkube/manifests/manifests COPY payload-manifests/crds/* /usr/share/bootkube/manifests/manifests # these are applied by the CVO -COPY manifests /manifests +RUN mkdir -p /manifests COPY payload-manifests/crds/* /manifests COPY payload-manifests/featuregates/* /manifests COPY payload-command/empty-resources /manifests diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile index 6982e4efe7..123efe1029 100644 --- a/vendor/github.com/openshift/api/Makefile +++ b/vendor/github.com/openshift/api/Makefile @@ -4,7 +4,7 @@ all: build update: update-codegen-crds RUNTIME ?= podman -RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.23-openshift-4.19 +RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.20 EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/ GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */)))) diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS index ff904b63a3..ebd9a2f459 100644 --- a/vendor/github.com/openshift/api/OWNERS +++ b/vendor/github.com/openshift/api/OWNERS @@ -1,7 +1,7 @@ reviewers: - - deads2k - JoelSpeed - everettraven approvers: - deads2k - JoelSpeed + - everettraven diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md index 934bcd3299..071da98c07 100644 --- a/vendor/github.com/openshift/api/README.md +++ b/vendor/github.com/openshift/api/README.md @@ -99,6 +99,8 @@ and then enforces the following rules. 3. Every test must be run on every TechPreview platform we have jobs for. (Ask for an exception if your feature doesn't support a variant.) 4. Every test must run at least 14 times on every platform/variant. 5. Every test must pass at least 95% of the time on every platform/variant. +6. Test results are taken from the last 7 days if the test was run at least 14 times during that period. Otherwise, data from the last 14 days is used. +7. Test flakes (even if the test eventually passes on a retry) are considered failures and negatively impact the pass rate. If your FeatureGate lacks automated testing, there is an exception process that allows QE to sign off on the promotion by commenting on the PR. diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go index 79be371535..35c9d2d988 100644 --- a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto index 6f50fcaf95..b0a1a1c083 100644 --- a/vendor/github.com/openshift/api/apps/v1/generated.proto +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -186,34 +186,43 @@ message DeploymentConfigSpec { message DeploymentConfigStatus { // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. + // +optional optional int64 latestVersion = 1; // observedGeneration is the most recent generation observed by the deployment config controller. + // +optional optional int64 observedGeneration = 2; // replicas is the total number of pods targeted by this deployment config. + // +optional optional int32 replicas = 3; // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. + // +optional optional int32 updatedReplicas = 4; // availableReplicas is the total number of available pods targeted by this deployment config. + // +optional optional int32 availableReplicas = 5; // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // +optional optional int32 unavailableReplicas = 6; // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger + // +optional optional DeploymentDetails details = 7; // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated DeploymentCondition conditions = 8; // Total number of ready pods targeted by this deployment. + // +optional optional int32 readyReplicas = 9; } diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go index 619c30e828..a66ce09ea5 100644 --- a/vendor/github.com/openshift/api/apps/v1/types.go +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -304,26 +304,35 @@ type DeploymentTriggerImageChangeParams struct { type DeploymentConfigStatus struct { // latestVersion is used to determine whether the current deployment associated with a deployment // config is out of sync. + // +optional LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` // observedGeneration is the most recent generation observed by the deployment config controller. + // +optional ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` // replicas is the total number of pods targeted by this deployment config. + // +optional Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` // updatedReplicas is the total number of non-terminated pods targeted by this deployment config // that have the desired template spec. + // +optional UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` // availableReplicas is the total number of available pods targeted by this deployment config. + // +optional AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` // unavailableReplicas is the total number of unavailable pods targeted by this deployment config. + // +optional UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` // details are the reasons for the update to this deployment config. // This could be based on a change made by the user or caused by an automatic trigger + // +optional Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` // conditions represents the latest available observations of a deployment config's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` // Total number of ready pods targeted by this deployment. + // +optional ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` } diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go index 11c22a80f3..3fd2e95d47 100644 --- a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto index f7d7b772a7..326ecf2e64 100644 --- a/vendor/github.com/openshift/api/authorization/v1/generated.proto +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -561,10 +561,12 @@ message SubjectRulesReviewSpec { // SubjectRulesReviewStatus is contains the result of a rules check message SubjectRulesReviewStatus { // rules is the list of rules (no particular sort) that are allowed for the subject + // +optional repeated PolicyRule rules = 1; // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. + // +optional optional string evaluationError = 2; } diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go index bf4071867f..ac9a6759f2 100644 --- a/vendor/github.com/openshift/api/authorization/v1/types.go +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -208,9 +208,11 @@ type SubjectRulesReviewSpec struct { // SubjectRulesReviewStatus is contains the result of a rules check type SubjectRulesReviewStatus struct { // rules is the list of rules (no particular sort) that are allowed for the subject + // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` // evaluationError can appear in combination with Rules. It means some error happened during evaluation // that may have prevented additional rules from being populated. + // +optional EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` } diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go index 9b7d44f3b2..208ae3467f 100644 --- a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto index 92ae73426c..7a18a7f027 100644 --- a/vendor/github.com/openshift/api/build/v1/generated.proto +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -163,11 +163,13 @@ message BuildConfigSpec { // BuildConfigStatus contains current state of the build config object. message BuildConfigStatus { // lastVersion is used to inform about number of last triggered build. + // +optional optional int64 lastVersion = 1; // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional repeated ImageChangeTriggerStatus imageChangeTriggers = 2; } @@ -465,54 +467,67 @@ message BuildSpec { message BuildStatus { // phase is the point in the build lifecycle. Possible values are // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional optional string phase = 1; // cancelled describes if a cancel event was triggered for the build. + // +optional optional bool cancelled = 2; // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional optional string reason = 3; // message is a human-readable message indicating details about why the build has this status. + // +optional optional string message = 4; // startTimestamp is a timestamp representing the server time when this Build started // running in a Pod. // It is represented in RFC3339 form and is in UTC. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; // completionTimestamp is a timestamp representing the server time when this Build was // finished, whether that build failed or succeeded. It reflects the time at which // the Pod running the Build terminated. // It is represented in RFC3339 form and is in UTC. + // +optional optional .k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; // duration contains time.Duration object describing build time. + // +optional optional int64 duration = 7; // outputDockerImageReference contains a reference to the container image that // will be built by this build. Its value is computed from // Build.Spec.Output.To, and should include the registry address, so that // it can be used to push and pull the image. + // +optional optional string outputDockerImageReference = 8; // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional optional .k8s.io.api.core.v1.ObjectReference config = 9; // output describes the container image the build has produced. + // +optional optional BuildStatusOutput output = 10; // stages contains details about each stage that occurs during the build // including start time, duration (in milliseconds), and the steps that // occured within each stage. + // +optional repeated StageInfo stages = 11; // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional optional string logSnippet = 12; // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated BuildCondition conditions = 13; } diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go index 12bf67db1a..b64aacc060 100644 --- a/vendor/github.com/openshift/api/build/v1/types.go +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -192,54 +192,67 @@ type ImageChangeCause struct { type BuildStatus struct { // phase is the point in the build lifecycle. Possible values are // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + // +optional Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` // cancelled describes if a cancel event was triggered for the build. + // +optional Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + // +optional Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` // message is a human-readable message indicating details about why the build has this status. + // +optional Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` // startTimestamp is a timestamp representing the server time when this Build started // running in a Pod. // It is represented in RFC3339 form and is in UTC. + // +optional StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` // completionTimestamp is a timestamp representing the server time when this Build was // finished, whether that build failed or succeeded. It reflects the time at which // the Pod running the Build terminated. // It is represented in RFC3339 form and is in UTC. + // +optional CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` // duration contains time.Duration object describing build time. + // +optional Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` // outputDockerImageReference contains a reference to the container image that // will be built by this build. Its value is computed from // Build.Spec.Output.To, and should include the registry address, so that // it can be used to push and pull the image. + // +optional OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` // config is an ObjectReference to the BuildConfig this Build is based on. + // +optional Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` // output describes the container image the build has produced. + // +optional Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` // stages contains details about each stage that occurs during the build // including start time, duration (in milliseconds), and the steps that // occured within each stage. + // +optional Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + // +optional LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` // conditions represents the latest available observations of a build's current state. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` } @@ -1005,11 +1018,13 @@ const ( // BuildConfigStatus contains current state of the build config object. type BuildConfigStatus struct { // lastVersion is used to inform about number of last triggered build. + // +optional LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` // imageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + // +optional ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` } diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go index d36b28c82b..03cb4a6a77 100644 --- a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go index 092825f352..6392002aa8 100644 --- a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/config/install.go b/vendor/github.com/openshift/api/config/install.go index 1c3c677477..851a6c3e1e 100644 --- a/vendor/github.com/openshift/api/config/install.go +++ b/vendor/github.com/openshift/api/config/install.go @@ -6,6 +6,7 @@ import ( configv1 "github.com/openshift/api/config/v1" configv1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha2 "github.com/openshift/api/config/v1alpha2" ) const ( @@ -13,7 +14,7 @@ const ( ) var ( - schemeBuilder = runtime.NewSchemeBuilder(configv1.Install, configv1alpha1.Install) + schemeBuilder = runtime.NewSchemeBuilder(configv1.Install, configv1alpha1.Install, configv1alpha2.Install) // Install is a function which adds every version of this group to a scheme Install = schemeBuilder.AddToScheme ) diff --git a/vendor/github.com/openshift/api/config/v1/register.go b/vendor/github.com/openshift/api/config/v1/register.go index 61302592ea..eac29a2367 100644 --- a/vendor/github.com/openshift/api/config/v1/register.go +++ b/vendor/github.com/openshift/api/config/v1/register.go @@ -72,6 +72,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ImageDigestMirrorSetList{}, &ImageTagMirrorSet{}, &ImageTagMirrorSetList{}, + &ImagePolicy{}, + &ImagePolicyList{}, + &ClusterImagePolicy{}, + &ClusterImagePolicyList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) return nil diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 75b647f745..0afe7b1d8d 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -51,15 +51,15 @@ type APIServerSpec struct { // server from JavaScript applications. // The values are regular expressions that correspond to the Golang regular expression language. // +optional + // +listType=atomic AdditionalCORSAllowedOrigins []string `json:"additionalCORSAllowedOrigins,omitempty"` // encryption allows the configuration of encryption of resources at the datastore layer. // +optional Encryption APIServerEncryption `json:"encryption"` // tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. // - // If unset, a default (which may change between releases) is chosen. Note that only Old, - // Intermediate and Custom profiles are currently supported, and the maximum available - // minTLSVersion is VersionTLS12. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is the Intermediate profile. // +optional TLSSecurityProfile *TLSSecurityProfile `json:"tlsSecurityProfile,omitempty"` // audit specifies the settings for audit configuration to be applied to all OpenShift-provided @@ -145,7 +145,7 @@ type AuditCustomRule struct { // If unset, the 'Default' profile is used as the default. // // +required - Profile AuditProfileType `json:"profile,omitempty"` + Profile AuditProfileType `json:"profile"` } type APIServerServingCerts struct { @@ -153,6 +153,8 @@ type APIServerServingCerts struct { // If no named certificates are provided, or no named certificates match the server name as understood by a client, // the defaultServingCertificate will be used. // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=32 NamedCertificates []APIServerNamedServingCert `json:"namedCertificates,omitempty"` } @@ -162,6 +164,8 @@ type APIServerNamedServingCert struct { // serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. // Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. // +optional + // +listType=atomic + // +kubebuilder:validation:MaxItems=64 Names []string `json:"names,omitempty"` // servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. // The secret must exist in the openshift-config namespace and contain the following required fields: @@ -170,6 +174,9 @@ type APIServerNamedServingCert struct { ServingCertificate SecretNameReference `json:"servingCertificate"` } +// APIServerEncryption is used to encrypt sensitive resources on the cluster. +// +openshift:validation:FeatureGateAwareXValidation:featureGate=KMSEncryptionProvider,rule="has(self.type) && self.type == 'KMS' ? has(self.kms) : !has(self.kms)",message="kms config is required when encryption type is KMS, and forbidden otherwise" +// +union type APIServerEncryption struct { // type defines what encryption type should be used to encrypt resources at the datastore layer. // When this field is unset (i.e. when it is set to the empty string), identity is implied. @@ -188,9 +195,23 @@ type APIServerEncryption struct { // +unionDiscriminator // +optional Type EncryptionType `json:"type,omitempty"` + + // kms defines the configuration for the external KMS instance that manages the encryption keys, + // when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an + // externally configured KMS instance. + // + // The Key Management Service (KMS) instance provides symmetric encryption and is responsible for + // managing the lifecyle of the encryption keys outside of the control plane. + // This allows integration with an external provider to manage the data encryption keys securely. + // + // +openshift:enable:FeatureGate=KMSEncryptionProvider + // +unionMember + // +optional + KMS *KMSConfig `json:"kms,omitempty"` } -// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";identity;aescbc;aesgcm +// +openshift:validation:FeatureGateAwareEnum:featureGate=KMSEncryptionProvider,enum="";identity;aescbc;aesgcm;KMS type EncryptionType string const ( @@ -205,6 +226,11 @@ const ( // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key // is used to perform encryption at the datastore layer. EncryptionTypeAESGCM EncryptionType = "aesgcm" + + // kms refers to a type of encryption where the encryption keys are managed + // outside the control plane in a Key Management Service instance, + // encryption is still performed at the datastore layer. + EncryptionTypeKMS EncryptionType = "KMS" ) type APIServerStatus struct { diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index a2af4d6544..52a41b2fef 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -5,7 +5,7 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,rule="!has(self.spec.oidcProviders) || self.spec.oidcProviders.all(p, !has(p.oidcClients) || p.oidcClients.all(specC, self.status.oidcClients.exists(statusC, statusC.componentNamespace == specC.componentNamespace && statusC.componentName == specC.componentName) || (has(oldSelf.spec.oidcProviders) && oldSelf.spec.oidcProviders.exists(oldP, oldP.name == p.name && has(oldP.oidcClients) && oldP.oidcClients.exists(oldC, oldC.componentNamespace == specC.componentNamespace && oldC.componentName == specC.componentName)))))",message="all oidcClients in the oidcProviders must match their componentName and componentNamespace to either a previously configured oidcClient or they must exist in the status.oidcClients" // Authentication specifies cluster-wide settings for authentication (like OAuth and // webhook token authenticators). The canonical name of an instance is `cluster`. @@ -90,6 +90,8 @@ type AuthenticationSpec struct { // +listMapKey=name // +kubebuilder:validation:MaxItems=1 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCProviders []OIDCProvider `json:"oidcProviders,omitempty"` } @@ -107,6 +109,7 @@ type AuthenticationStatus struct { // If the config map or expected key is not found, no metadata is served. // If the specified metadata is not valid, no metadata is served. // The namespace for this config map is openshift-config-managed. + // +optional IntegratedOAuthMetadata ConfigMapNameReference `json:"integratedOAuthMetadata"` // oidcClients is where participating operators place the current OIDC client status @@ -117,6 +120,8 @@ type AuthenticationStatus struct { // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 // +openshift:enable:FeatureGate=ExternalOIDC + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + // +optional OIDCClients []OIDCClientStatus `json:"oidcClients"` } @@ -135,7 +140,7 @@ type AuthenticationList struct { } // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum="";None;IntegratedOAuth -// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC,enum="";None;IntegratedOAuth;OIDC +// +openshift:validation:FeatureGateAwareEnum:featureGate=ExternalOIDC;ExternalOIDCWithUIDAndExtraClaimMappings,enum="";None;IntegratedOAuth;OIDC type AuthenticationType string const ( @@ -193,32 +198,50 @@ const ( ) type OIDCProvider struct { - // name of the OIDC provider + // name is a required field that configures the unique human-readable identifier + // associated with the identity provider. + // It is used to distinguish between multiple identity providers + // and has no impact on token validation or authentication mechanics. + // + // name must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required Name string `json:"name"` - // issuer describes atributes of the OIDC token issuer + + // issuer is a required field that configures how the platform interacts + // with the identity provider and how tokens issued from the identity provider + // are evaluated by the Kubernetes API server. // // +required Issuer TokenIssuer `json:"issuer"` - // oidcClients contains configuration for the platform's clients that - // need to request tokens from the issuer + // oidcClients is an optional field that configures how on-cluster, + // platform clients should request tokens from the identity provider. + // oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs. // // +listType=map // +listMapKey=componentNamespace // +listMapKey=componentName // +kubebuilder:validation:MaxItems=20 + // +optional OIDCClients []OIDCClientConfig `json:"oidcClients"` - // claimMappings describes rules on how to transform information from an - // ID token into a cluster identity + // claimMappings is a required field that configures the rules to be used by + // the Kubernetes API server for translating claims in a JWT token, issued + // by the identity provider, to a cluster identity. + // + // +required ClaimMappings TokenClaimMappings `json:"claimMappings"` - // claimValidationRules are rules that are applied to validate token claims to authenticate users. + // claimValidationRules is an optional field that configures the rules to + // be used by the Kubernetes API server for validating the claims in a JWT + // token issued by the identity provider. + // + // Validation rules are joined via an AND operation. // // +listType=atomic + // +optional ClaimValidationRules []TokenClaimValidationRule `json:"claimValidationRules,omitempty"` } @@ -226,17 +249,29 @@ type OIDCProvider struct { type TokenAudience string type TokenIssuer struct { - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. - // - // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` + // issuerURL is a required field that configures the URL used to issue tokens + // by the identity provider. + // The Kubernetes API server determines how authentication tokens should be handled + // by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers. + // + // Must be at least 1 character and must not exceed 512 characters in length. + // Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user. + // + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="must be a valid URL" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getScheme() == 'https'",message="must use the 'https' scheme" + // +kubebuilder:validation:XValidation:rule="isURL(self) && url(self).getQuery() == {}",message="must not have a query" + // +kubebuilder:validation:XValidation:rule="self.find('#(.+)$') == ''",message="must not have a fragment" + // +kubebuilder:validation:XValidation:rule="self.find('@') == ''",message="must not have user info" + // +kubebuilder:validation:MaxLength=512 + // +kubebuilder:validation:MinLength=1 // +required URL string `json:"issuerURL"` - // audiences is an array of audiences that the token was issued for. - // Valid tokens must include at least one of these values in their - // "aud" claim. - // Must be set to exactly one value. + // audiences is a required field that configures the acceptable audiences + // the JWT token, issued by the identity provider, must be issued to. + // At least one of the entries must match the 'aud' claim in the JWT token. + // + // audiences must contain at least one entry and must not exceed ten entries. // // +listType=set // +kubebuilder:validation:MinItems=1 @@ -244,93 +279,293 @@ type TokenIssuer struct { // +required Audiences []TokenAudience `json:"audiences"` - // CertificateAuthority is a reference to a config map in the - // configuration namespace. The .data of the configMap must contain - // the "ca-bundle.crt" key. - // If unset, system trust is used instead. + // issuerCertificateAuthority is an optional field that configures the + // certificate authority, used by the Kubernetes API server, to validate + // the connection to the identity provider when fetching discovery information. + // + // When not specified, the system trust is used. + // + // When specified, it must reference a ConfigMap in the openshift-config + // namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' + // key in the data field of the ConfigMap. + // + // +optional CertificateAuthority ConfigMapNameReference `json:"issuerCertificateAuthority"` } type TokenClaimMappings struct { - // username is a name of the claim that should be used to construct - // usernames for the cluster identity. + // username is a required field that configures how the username of a cluster identity + // should be constructed from the claims in a JWT token issued by the identity provider. // - // Default value: "sub" - Username UsernameClaimMapping `json:"username,omitempty"` + // +required + Username UsernameClaimMapping `json:"username"` - // groups is a name of the claim that should be used to construct - // groups for the cluster identity. - // The referenced claim must use array of strings values. + // groups is an optional field that configures how the groups of a cluster identity + // should be constructed from the claims in a JWT token issued + // by the identity provider. + // When referencing a claim, if the claim is present in the JWT + // token, its value must be a list of groups separated by a comma (','). + // For example - '"example"' and '"exampleOne", "exampleTwo", "exampleThree"' are valid claim values. + // + // +optional Groups PrefixedClaimMapping `json:"groups,omitempty"` + + // uid is an optional field for configuring the claim mapping + // used to construct the uid for the cluster identity. + // + // When using uid.claim to specify the claim it must be a single string value. + // When using uid.expression the expression must result in a single string value. + // + // When omitted, this means the user has no opinion and the platform + // is left to choose a default, which is subject to change over time. + // The current default is to use the 'sub' claim. + // + // +optional + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + UID *TokenClaimOrExpressionMapping `json:"uid,omitempty"` + + // extra is an optional field for configuring the mappings + // used to construct the extra attribute for the cluster identity. + // When omitted, no extra attributes will be present on the cluster identity. + // key values for extra mappings must be unique. + // A maximum of 32 extra attribute mappings may be provided. + // + // +optional + // +kubebuilder:validation:MaxItems=32 + // +listType=map + // +listMapKey=key + // +openshift:enable:FeatureGate=ExternalOIDCWithUIDAndExtraClaimMappings + Extra []ExtraMapping `json:"extra,omitempty"` } +// TokenClaimMapping allows specifying a JWT token +// claim to be used when mapping claims from an +// authentication token to cluster identities. type TokenClaimMapping struct { - // claim is a JWT token claim to be used in the mapping + // claim is a required field that configures the JWT token + // claim whose value is assigned to the cluster identity + // field associated with this mapping. // // +required Claim string `json:"claim"` } +// TokenClaimOrExpressionMapping allows specifying either a JWT +// token claim or CEL expression to be used when mapping claims +// from an authentication token to cluster identities. +// +kubebuilder:validation:XValidation:rule="has(self.claim) ? !has(self.expression) : has(self.expression)",message="precisely one of claim or expression must be set" +type TokenClaimOrExpressionMapping struct { + // claim is an optional field for specifying the + // JWT token claim that is used in the mapping. + // The value of this claim will be assigned to + // the field in which this mapping is associated. + // + // Precisely one of claim or expression must be set. + // claim must not be specified when expression is set. + // When specified, claim must be at least 1 character in length + // and must not exceed 256 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:MinLength=1 + Claim string `json:"claim,omitempty"` + + // expression is an optional field for specifying a + // CEL expression that produces a string value from + // JWT token claims. + // + // CEL expressions have access to the token claims + // through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // Precisely one of claim or expression must be set. + // expression must not be specified when claim is set. + // When specified, expression must be at least 1 character in length + // and must not exceed 1024 characters in length. + // + // +optional + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:MinLength=1 + Expression string `json:"expression,omitempty"` +} + +// ExtraMapping allows specifying a key and CEL expression +// to evaluate the keys' value. It is used to create additional +// mappings and attributes added to a cluster identity from +// a provided authentication token. +type ExtraMapping struct { + // key is a required field that specifies the string + // to use as the extra attribute key. + // + // key must be a domain-prefix path (e.g 'example.org/foo'). + // key must not exceed 510 characters in length. + // key must contain the '/' character, separating the domain and path characters. + // key must not be empty. + // + // The domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. + // It must not exceed 253 characters in length. + // It must start and end with an alphanumeric character. + // It must only contain lower case alphanumeric characters and '-' or '.'. + // It must not use the reserved domains, or be subdomains of, "kubernetes.io", "k8s.io", and "openshift.io". + // + // The path portion of the key (string of characters after the '/') must not be empty and must consist of at least one + // alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. + // It must not exceed 256 characters in length. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=510 + // +kubebuilder:validation:XValidation:rule="self.contains('/')",message="key must contain the '/' character" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].matches(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$\")",message="the domain of the key must consist of only lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0].size() <= 253",message="the domain of the key must not exceed 253 characters in length" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'kubernetes.io'",message="the domain 'kubernetes.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.kubernetes.io')",message="the subdomains '*.kubernetes.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'k8s.io'",message="the domain 'k8s.io' is reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.k8s.io')",message="the subdomains '*.k8s.io' are reserved for Kubernetes use" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[0] != 'openshift.io'",message="the domain 'openshift.io' is reserved for OpenShift use" + // +kubebuilder:validation:XValidation:rule="!self.split('/', 2)[0].endsWith('.openshift.io')",message="the subdomains '*.openshift.io' are reserved for OpenShift use" + // + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].matches('[A-Za-z0-9/\\\\-._~%!$&\\'()*+;=:]+')",message="the path of the key must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, apostrophe, '-', '.', '_', '~', '!', '$', '&', '(', ')', '*', '+', ',', ';', '=', and ':'" + // +kubebuilder:validation:XValidation:rule="self.split('/', 2)[1].size() <= 256",message="the path of the key must not exceed 256 characters in length" + Key string `json:"key"` + + // valueExpression is a required field to specify the CEL expression to extract + // the extra attribute value from a JWT token's claims. + // valueExpression must produce a string or string array value. + // "", [], and null are treated as the extra mapping not being present. + // Empty string values within an array are filtered out. + // + // CEL expressions have access to the token claims + // through a CEL variable, 'claims'. + // 'claims' is a map of claim names to claim values. + // For example, the 'sub' claim value can be accessed as 'claims.sub'. + // Nested claims can be accessed using dot notation ('claims.foo.bar'). + // + // valueExpression must not exceed 1024 characters in length. + // valueExpression must not be empty. + // + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=1024 + ValueExpression string `json:"valueExpression"` +} + +// OIDCClientConfig configures how platform clients +// interact with identity providers as an authentication +// method type OIDCClientConfig struct { - // componentName is the name of the component that is supposed to consume this - // client configuration + // componentName is a required field that specifies the name of the platform + // component being configured to use the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required ComponentName string `json:"componentName"` - // componentNamespace is the namespace of the component that is supposed to consume this - // client configuration + // componentNamespace is a required field that specifies the namespace in which the + // platform component being configured to use the identity provider as an authentication + // mode is running. + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +required ComponentNamespace string `json:"componentNamespace"` - // clientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that configures the client identifier, from + // the identity provider, that the platform component uses for authentication + // requests made to the identity provider. + // The identity provider must accept this identifier for platform components + // to be able to use the identity provider as an authentication mode. + // + // clientID must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required ClientID string `json:"clientID"` - // clientSecret refers to a secret in the `openshift-config` namespace that - // contains the client secret in the `clientSecret` key of the `.data` field + // clientSecret is an optional field that configures the client secret used + // by the platform component when making authentication requests to the identity provider. + // + // When not specified, no client secret will be used when making authentication requests + // to the identity provider. + // + // When specified, clientSecret references a Secret in the 'openshift-config' + // namespace that contains the client secret in the 'clientSecret' key of the '.data' field. + // The client secret will be used when making authentication requests to the identity provider. + // + // Public clients do not require a client secret but private + // clients do require a client secret to work with the identity provider. + // + // +optional ClientSecret SecretNameReference `json:"clientSecret"` - // extraScopes is an optional set of scopes to request tokens with. + // extraScopes is an optional field that configures the extra scopes that should + // be requested by the platform component when making authentication requests to the + // identity provider. + // This is useful if you have configured claim mappings that requires specific + // scopes to be requested beyond the standard OIDC scopes. + // + // When omitted, no additional scopes are requested. // // +listType=set + // +optional ExtraScopes []string `json:"extraScopes"` } +// OIDCClientStatus represents the current state +// of platform components and how they interact with +// the configured identity providers. type OIDCClientStatus struct { - // componentName is the name of the component that will consume a client configuration. + // componentName is a required field that specifies the name of the platform + // component using the identity provider as an authentication mode. + // It is used in combination with componentNamespace as a unique identifier. + // + // componentName must not be an empty string ("") and must not exceed 256 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=256 // +required ComponentName string `json:"componentName"` - // componentNamespace is the namespace of the component that will consume a client configuration. + // componentNamespace is a required field that specifies the namespace in which the + // platform component using the identity provider as an authentication + // mode is running. + // It is used in combination with componentName as a unique identifier. + // + // componentNamespace must not be an empty string ("") and must not exceed 63 characters in length. // // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=63 // +required ComponentNamespace string `json:"componentNamespace"` - // currentOIDCClients is a list of clients that the component is currently using. + // currentOIDCClients is an optional list of clients that the component is currently using. + // Entries must have unique issuerURL/clientID pairs. // // +listType=map // +listMapKey=issuerURL // +listMapKey=clientID + // +optional CurrentOIDCClients []OIDCClientReference `json:"currentOIDCClients"` - // consumingUsers is a slice of ServiceAccounts that need to have read - // permission on the `clientSecret` secret. + // consumingUsers is an optional list of ServiceAccounts requiring + // read permissions on the `clientSecret` secret. + // + // consumingUsers must not exceed 5 entries. // // +kubebuilder:validation:MaxItems=5 // +listType=set + // +optional ConsumingUsers []ConsumingUser `json:"consumingUsers"` // conditions are used to communicate the state of the `oidcClients` entry. @@ -347,21 +582,32 @@ type OIDCClientStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } +// OIDCClientReference is a reference to a platform component +// client configuration. type OIDCClientReference struct { - // OIDCName refers to the `name` of the provider from `oidcProviders` + // oidcProviderName is a required reference to the 'name' of the identity provider + // configured in 'oidcProviders' that this client is associated with. + // + // oidcProviderName must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required OIDCProviderName string `json:"oidcProviderName"` - // URL is the serving URL of the token issuer. - // Must use the https:// scheme. + // issuerURL is a required field that specifies the URL of the identity + // provider that this client is configured to make requests against. + // + // issuerURL must use the 'https' scheme. // // +kubebuilder:validation:Pattern=`^https:\/\/[^\s]` // +required IssuerURL string `json:"issuerURL"` - // clientID is the identifier of the OIDC client from the OIDC provider + // clientID is a required field that specifies the client identifier, from + // the identity provider, that the platform component is using for authentication + // requests made to the identity provider. + // + // clientID must not be empty. // // +kubebuilder:validation:MinLength=1 // +required @@ -369,35 +615,61 @@ type OIDCClientReference struct { } // +kubebuilder:validation:XValidation:rule="has(self.prefixPolicy) && self.prefixPolicy == 'Prefix' ? (has(self.prefix) && size(self.prefix.prefixString) > 0) : !has(self.prefix)",message="prefix must be set if prefixPolicy is 'Prefix', but must remain unset otherwise" +// +union type UsernameClaimMapping struct { - TokenClaimMapping `json:",inline"` + // claim is a required field that configures the JWT token + // claim whose value is assigned to the cluster identity + // field associated with this mapping. + // + // claim must not be an empty string ("") and must not exceed 256 characters. + // + // +required + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=256 + Claim string `json:"claim"` - // prefixPolicy specifies how a prefix should apply. + // prefixPolicy is an optional field that configures how a prefix should be + // applied to the value of the JWT claim specified in the 'claim' field. // - // By default, claims other than `email` will be prefixed with the issuer URL to - // prevent naming clashes with other plugins. + // Allowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string). // - // Set to "NoPrefix" to disable prefixing. + // When set to 'Prefix', the value specified in the prefix field will be + // prepended to the value of the JWT claim. + // The prefix field must be set when prefixPolicy is 'Prefix'. // - // Example: - // (1) `prefix` is set to "myoidc:" and `claim` is set to "username". - // If the JWT claim `username` contains value `userA`, the resulting - // mapped value will be "myoidc:userA". - // (2) `prefix` is set to "myoidc:" and `claim` is set to "email". If the - // JWT `email` claim contains value "userA@myoidc.tld", the resulting - // mapped value will be "myoidc:userA@myoidc.tld". - // (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, - // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", - // and `claim` is set to: - // (a) "username": the mapped value will be "https://myoidc.tld#userA" - // (b) "email": the mapped value will be "userA@myoidc.tld" + // When set to 'NoPrefix', no prefix will be prepended to the value + // of the JWT claim. + // + // When omitted, this means no opinion and the platform is left to choose + // any prefixes that are applied which is subject to change over time. + // Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim + // when the claim is not 'email'. + // As an example, consider the following scenario: + // `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`, + // the JWT claims include "username":"userA" and "email":"userA@myoidc.tld", + // and `claim` is set to: + // - "username": the mapped value will be "https://myoidc.tld#userA" + // - "email": the mapped value will be "userA@myoidc.tld" // // +kubebuilder:validation:Enum={"", "NoPrefix", "Prefix"} + // +optional + // +unionDiscriminator PrefixPolicy UsernamePrefixPolicy `json:"prefixPolicy"` + // prefix configures the prefix that should be prepended to the value + // of the JWT claim. + // + // prefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise. + // + // +optional + // +unionMember Prefix *UsernamePrefix `json:"prefix"` } +// UsernamePrefixPolicy configures how prefixes should be applied +// to values extracted from the JWT claims during the process of mapping +// JWT claims to cluster identity attributes. +// +enum type UsernamePrefixPolicy string var ( @@ -412,26 +684,42 @@ var ( Prefix UsernamePrefixPolicy = "Prefix" ) +// UsernamePrefix configures the string that should +// be used as a prefix for username claim mappings. type UsernamePrefix struct { + // prefixString is a required field that configures the prefix that will + // be applied to cluster identity username attribute + // during the process of mapping JWT claims to cluster identity attributes. + // + // prefixString must not be an empty string (""). + // // +kubebuilder:validation:MinLength=1 // +required PrefixString string `json:"prefixString"` } +// PrefixedClaimMapping configures a claim mapping +// that allows for an optional prefix. type PrefixedClaimMapping struct { TokenClaimMapping `json:",inline"` - // prefix is a string to prefix the value from the token in the result of the - // claim mapping. + // prefix is an optional field that configures the prefix that will be + // applied to the cluster identity attribute during the process of mapping + // JWT claims to cluster identity attributes. // - // By default, no prefixing occurs. + // When omitted (""), no prefix is applied to the cluster identity attribute. // - // Example: if `prefix` is set to "myoidc:"" and the `claim` in JWT contains + // Example: if `prefix` is set to "myoidc:" and the `claim` in JWT contains // an array of strings "a", "b" and "c", the mapping will result in an // array of string "myoidc:a", "myoidc:b" and "myoidc:c". + // + // +optional Prefix string `json:"prefix"` } +// TokenValidationRuleType represents the different +// claim validation rule types that can be configured. +// +enum type TokenValidationRuleType string const ( @@ -439,26 +727,45 @@ const ( ) type TokenClaimValidationRule struct { - // type sets the type of the validation rule + // type is an optional field that configures the type of the validation rule. + // + // Allowed values are 'RequiredClaim' and omitted (not provided or an empty string). + // + // When set to 'RequiredClaim', the Kubernetes API server + // will be configured to validate that the incoming JWT + // contains the required claim and that its value matches + // the required value. + // + // Defaults to 'RequiredClaim'. // // +kubebuilder:validation:Enum={"RequiredClaim"} // +kubebuilder:default="RequiredClaim" Type TokenValidationRuleType `json:"type"` - // requiredClaim allows configuring a required claim name and its expected - // value - RequiredClaim *TokenRequiredClaim `json:"requiredClaim"` + // requiredClaim is an optional field that configures the required claim + // and value that the Kubernetes API server will use to validate if an incoming + // JWT is valid for this identity provider. + // + // +optional + RequiredClaim *TokenRequiredClaim `json:"requiredClaim,omitempty"` } type TokenRequiredClaim struct { - // claim is a name of a required claim. Only claims with string values are - // supported. + // claim is a required field that configures the name of the required claim. + // When taken from the JWT claims, claim must be a string value. + // + // claim must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required Claim string `json:"claim"` - // requiredValue is the required value for the claim. + // requiredValue is a required field that configures the value that 'claim' must + // have when taken from the incoming JWT claims. + // If the value in the JWT claims does not match, the token + // will be rejected for authentication. + // + // requiredValue must not be an empty string (""). // // +kubebuilder:validation:MinLength=1 // +required diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go new file mode 100644 index 0000000000..ca604e05c5 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_image_policy.go @@ -0,0 +1,87 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicy holds cluster-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=clusterimagepolicies,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec contains the configuration for the cluster image policy. + // +required + Spec ClusterImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ClusterImagePolicyStatus `json:"status"` +} + +// CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource. +type ClusterImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +k8s:deepcopy-gen=true +type ClusterImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterImagePolicyList is a list of ClusterImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ClusterImagePolices + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ClusterImagePolicy `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index a447adb9f4..8323040389 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -9,10 +9,9 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterOperator is the Custom Resource object which holds the current state -// of an operator. This object is used by operators to convey their state to -// the rest of the cluster. -// +// ClusterOperator holds the status of a core or optional OpenShift component +// managed by the Cluster Version Operator (CVO). This object is used by +// operators to convey their state to the rest of the cluster. // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 // +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/497 @@ -154,15 +153,21 @@ const ( // is functional and available in the cluster. Available=False means at least // part of the component is non-functional, and that the condition requires // immediate administrator intervention. + // A component must not report Available=False during the course of a normal upgrade. OperatorAvailable ClusterStatusConditionType = "Available" // Progressing indicates that the component (operator and all configured operands) - // is actively rolling out new code, propagating config changes, or otherwise + // is actively rolling out new code, propagating config changes (e.g, a version change), or otherwise // moving from one steady state to another. Operators should not report - // progressing when they are reconciling (without action) a previously known - // state. If the observed cluster state has changed and the component is - // reacting to it (scaling up for instance), Progressing should become true + // Progressing when they are reconciling (without action) a previously known + // state. Operators should not report Progressing only because DaemonSets owned by them + // are adjusting to a new node from cluster scaleup or a node rebooting from cluster upgrade. + // If the observed cluster state has changed and the component is + // reacting to it (updated proxy configuration for instance), Progressing should become true // since it is moving from one steady state to another. + // A component in a cluster with less than 250 nodes must complete a version + // change within a limited period of time: 90 minutes for Machine Config Operator and 20 minutes for others. + // Machine Config Operator is given more time as it needs to restart control plane nodes. OperatorProgressing ClusterStatusConditionType = "Progressing" // Degraded indicates that the component (operator and all configured operands) @@ -175,7 +180,7 @@ const ( // Degraded because it may have a lower quality of service. A component may be // Progressing but not Degraded because the transition from one state to // another does not persist over a long enough period to report Degraded. A - // component should not report Degraded during the course of a normal upgrade. + // component must not report Degraded during the course of a normal upgrade. // A component may report Degraded in response to a persistent infrastructure // failure that requires eventual administrator intervention. For example, if // a control plane host is unhealthy and must be replaced. A component should diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 092bebff09..cfac9689e4 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -83,8 +83,8 @@ type ClusterVersionSpec struct { // // +optional Upstream URL `json:"upstream,omitempty"` - // channel is an identifier for explicitly requesting that a non-default - // set of updates be applied to this cluster. The default channel will be + // channel is an identifier for explicitly requesting a non-default set + // of updates to be applied to this cluster. The default channel will // contain stable updates that are appropriate for production clusters. // // +optional @@ -163,6 +163,7 @@ type ClusterVersionStatus struct { VersionHash string `json:"versionHash"` // capabilities describes the state of optional, core cluster components. + // +optional Capabilities ClusterVersionCapabilitiesStatus `json:"capabilities"` // conditions provides information about the cluster version. The condition @@ -256,7 +257,7 @@ type UpdateHistory struct { // acceptedRisks records risks which were accepted to initiate the update. // For example, it may menition an Upgradeable=False or missing signature - // that was overriden via desiredUpdate.force, or an update that was + // that was overridden via desiredUpdate.force, or an update that was // initiated despite not being in the availableUpdates set of recommended // update targets. // +optional diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index 0ccc4a8f85..dc6967bf15 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -45,6 +45,7 @@ type ConsoleSpec struct { type ConsoleStatus struct { // The URL for the console. This will be derived from the host for the route that // is created for the console. + // +optional ConsoleURL string `json:"consoleURL"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 0709a75ae8..169e29c5c5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -112,6 +112,7 @@ type FeatureGateStatus struct { // Only featureGates with .version in the ClusterVersion.status will be present in this list. // +listType=map // +listMapKey=version + // +optional FeatureGates []FeatureGateDetails `json:"featureGates"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_policy.go new file mode 100644 index 0000000000..54bd21adb4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_image_policy.go @@ -0,0 +1,322 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicy holds namespace-wide configuration for image signature verification +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=imagepolicies,scope=Namespaced +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2310 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=SigstoreImageVerification +// +openshift:compatibility-gen:level=1 +type ImagePolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata"` + + // spec holds user settable values for configuration + // +required + Spec ImagePolicySpec `json:"spec"` + // status contains the observed state of the resource. + // +optional + Status ImagePolicyStatus `json:"status"` +} + +// ImagePolicySpec is the specification of the ImagePolicy CRD. +type ImagePolicySpec struct { + // scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the "Docker Registry HTTP API V2". + // Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). + // More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository + // namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). + // Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. + // This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. + // In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories + // quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. + // If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. + // For additional details about the format, please refer to the document explaining the docker transport field, + // which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker + // +required + // +kubebuilder:validation:MaxItems=256 + // +listType=set + Scopes []ImageScope `json:"scopes"` + // policy is a required field that contains configuration to allow scopes to be verified, and defines how + // images not matching the verification policy will be treated. + // +required + Policy Policy `json:"policy"` +} + +// +kubebuilder:validation:XValidation:rule="size(self.split('/')[0].split('.')) == 1 ? self.split('/')[0].split('.')[0].split(':')[0] == 'localhost' : true",message="invalid image scope format, scope must contain a fully qualified domain name or 'localhost'" +// +kubebuilder:validation:XValidation:rule=`self.contains('*') ? self.matches('^\\*(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+$') : true`,message="invalid image scope with wildcard, a wildcard can only be at the start of the domain and is only supported for subdomain matching, not path matching" +// +kubebuilder:validation:XValidation:rule=`!self.contains('*') ? self.matches('^((((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?)(?::([\\w][\\w.-]{0,127}))?(?:@([A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}))?$') : true`,message="invalid repository namespace or image specification in the image scope" +// +kubebuilder:validation:MaxLength=512 +type ImageScope string + +// Policy defines the verification policy for the items in the scopes list. +type Policy struct { + // rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. + // This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated. + // +required + RootOfTrust PolicyRootOfTrust `json:"rootOfTrust"` + // signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. + // The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is "MatchRepoDigestOrExact". + // +optional + SignedIdentity *PolicyIdentity `json:"signedIdentity,omitempty"` +} + +// PolicyRootOfTrust defines the root of trust based on the selected policyType. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'PublicKey' ? has(self.publicKey) : !has(self.publicKey)",message="publicKey is required when policyType is PublicKey, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.policyType) && self.policyType == 'FulcioCAWithRekor' ? has(self.fulcioCAWithRekor) : !has(self.fulcioCAWithRekor)",message="fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise" +// +openshift:validation:FeatureGateAwareXValidation:featureGate=SigstoreImageVerificationPKI,rule="has(self.policyType) && self.policyType == 'PKI' ? has(self.pki) : !has(self.pki)",message="pki is required when policyType is PKI, and forbidden otherwise" +type PolicyRootOfTrust struct { + // policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. + // Allowed values are "PublicKey", "FulcioCAWithRekor", and "PKI". + // When set to "PublicKey", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. + // When set to "FulcioCAWithRekor", the policy is based on the Fulcio certification and incorporates a Rekor verification. + // When set to "PKI", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // +unionDiscriminator + // +required + PolicyType PolicyType `json:"policyType"` + // publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. + // publicKey is required when policyType is PublicKey, and forbidden otherwise. + // +optional + PublicKey *PublicKey `json:"publicKey,omitempty"` + // fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. + // fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise + // For more information about Fulcio and Rekor, please refer to the document at: + // https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor + // +optional + FulcioCAWithRekor *FulcioCAWithRekor `json:"fulcioCAWithRekor,omitempty"` + // pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. + // pki is required when policyType is PKI, and forbidden otherwise. + // +optional + // +openshift:enable:FeatureGate=SigstoreImageVerificationPKI + PKI *PKI `json:"pki,omitempty"` +} + +// +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=PublicKey;FulcioCAWithRekor +// +openshift:validation:FeatureGateAwareEnum:featureGate=SigstoreImageVerificationPKI,enum=PublicKey;FulcioCAWithRekor;PKI +type PolicyType string + +const ( + PublicKeyRootOfTrust PolicyType = "PublicKey" + FulcioCAWithRekorRootOfTrust PolicyType = "FulcioCAWithRekor" + PKIRootOfTrust PolicyType = "PKI" +) + +// PublicKey defines the root of trust based on a sigstore public key. +type PublicKey struct { + // keyData is a required field contains inline base64-encoded data for the PEM format public key. + // keyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=68 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the keyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the keyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + KeyData []byte `json:"keyData"` + // rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +optional + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData,omitempty"` +} + +// FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. +type FulcioCAWithRekor struct { + // fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. + // fulcioCAData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the fulcioCAData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the fulcioCAData must end with base64 encoding of '-----END CERTIFICATE-----'." + FulcioCAData []byte `json:"fulcioCAData"` + // rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. + // rekorKeyData must be at most 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN PUBLIC KEY-----')",message="the rekorKeyData must start with base64 encoding of '-----BEGIN PUBLIC KEY-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END PUBLIC KEY-----\\n') || string(self).endsWith('-----END PUBLIC KEY-----')",message="the rekorKeyData must end with base64 encoding of '-----END PUBLIC KEY-----'." + RekorKeyData []byte `json:"rekorKeyData"` + // fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration. + // +required + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` +} + +// PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. +type PolicyFulcioSubject struct { + // oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. + // It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. + // When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. + // Example: "https://expected.OIDC.issuer/" + // +required + // +kubebuilder:validation:MaxLength=2048 + // +kubebuilder:validation:XValidation:rule="isURL(self)",message="oidcIssuer must be a valid URL" + OIDCIssuer string `json:"oidcIssuer"` + // signedEmail is a required field holds the email address that the Fulcio certificate is issued for. + // The signedEmail must be a valid email address and at most 320 characters in length. + // Example: "expected-signing-user@example.com" + // +required + // +kubebuilder:validation:MaxLength=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + SignedEmail string `json:"signedEmail"` +} + +// PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates. +type PKI struct { + // caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. + // +required + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caRootsData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caRootsData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caRootsData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + CertificateAuthorityRootsData []byte `json:"caRootsData"` + // caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. + // caIntermediatesData requires caRootsData to be set. + // +optional + // +kubebuilder:validation:XValidation:rule="string(self).startsWith('-----BEGIN CERTIFICATE-----')",message="the caIntermediatesData must start with base64 encoding of '-----BEGIN CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).endsWith('-----END CERTIFICATE-----\\n') || string(self).endsWith('-----END CERTIFICATE-----')",message="the caIntermediatesData must end with base64 encoding of '-----END CERTIFICATE-----'." + // +kubebuilder:validation:XValidation:rule="string(self).findAll('-----BEGIN CERTIFICATE-----').size() == string(self).findAll('-----END CERTIFICATE-----').size()",message="caIntermediatesData must be base64 encoding of valid PEM format data contain the same number of '-----BEGIN CERTIFICATE-----' and '-----END CERTIFICATE-----' markers." + // +kubebuilder:validation:MaxLength=8192 + // +kubebuilder:validation:MinLength=72 + CertificateAuthorityIntermediatesData []byte `json:"caIntermediatesData,omitempty"` + + // pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued. + // +required + PKICertificateSubject PKICertificateSubject `json:"pkiCertificateSubject"` +} + +// PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued. +// +kubebuilder:validation:XValidation:rule="has(self.email) || has(self.hostname)", message="at least one of email or hostname must be set in pkiCertificateSubject" +// +openshift:enable:FeatureGate=SigstoreImageVerificationPKI +type PKICertificateSubject struct { + // email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. + // The email must be a valid email address and at most 320 characters in length. + // +optional + // +kubebuilder:validation:MaxLength:=320 + // +kubebuilder:validation:XValidation:rule=`self.matches('^\\S+@\\S+$')`,message="invalid email address" + Email string `json:"email,omitempty"` + // hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. + // The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. + // It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk. + // +optional + // +kubebuilder:validation:MaxLength:=253 + // +kubebuilder:validation:XValidation:rule="self.startsWith('*.') ? !format.dns1123Subdomain().validate(self.replace('*.', '', 1)).hasValue() : !format.dns1123Subdomain().validate(self).hasValue()",message="hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.'. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk." + Hostname string `json:"hostname,omitempty"` +} + +// PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is "MatchRepoDigestOrExact". +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'ExactRepository') ? has(self.exactRepository) : !has(self.exactRepository)",message="exactRepository is required when matchPolicy is ExactRepository, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="(has(self.matchPolicy) && self.matchPolicy == 'RemapIdentity') ? has(self.remapIdentity) : !has(self.remapIdentity)",message="remapIdentity is required when matchPolicy is RemapIdentity, and forbidden otherwise" +// +union +type PolicyIdentity struct { + // matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. + // Allowed values are "MatchRepoDigestOrExact", "MatchRepository", "ExactRepository", "RemapIdentity". When omitted, the default value is "MatchRepoDigestOrExact". + // When set to "MatchRepoDigestOrExact", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. + // When set to "MatchRepository", the identity in the signature must be in the same repository as the image identity. + // When set to "ExactRepository", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by "repository". + // When set to "RemapIdentity", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the "prefix" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix. + // +unionDiscriminator + // +required + MatchPolicy IdentityMatchPolicy `json:"matchPolicy"` + // exactRepository specifies the repository that must be exactly matched by the identity in the signature. + // exactRepository is required if matchPolicy is set to "ExactRepository". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity. + // +optional + PolicyMatchExactRepository *PolicyMatchExactRepository `json:"exactRepository,omitempty"` + // remapIdentity specifies the prefix remapping rule for verifying image identity. + // remapIdentity is required if matchPolicy is set to "RemapIdentity". It is used to verify that the signature claims a different registry/repository prefix than the original image. + // +optional + PolicyMatchRemapIdentity *PolicyMatchRemapIdentity `json:"remapIdentity,omitempty"` +} + +// +kubebuilder:validation:MaxLength=512 +// +kubebuilder:validation:XValidation:rule=`self.matches('.*:([\\w][\\w.-]{0,127})$')? self.matches('^(localhost:[0-9]+)$'): true`,message="invalid repository or prefix in the signedIdentity, should not include the tag or digest" +// +kubebuilder:validation:XValidation:rule=`self.matches('^(((?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:\\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+(?::[0-9]+)?)|(localhost(?::[0-9]+)?))(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$')`,message="invalid repository or prefix in the signedIdentity. The repository or prefix must starts with 'localhost' or a valid '.' separated domain. If contains registry paths, the path component names must start with at least one letter or number, with following parts able to be separated by one period, one or two underscore and multiple dashes." +type IdentityRepositoryPrefix string + +type PolicyMatchExactRepository struct { + // repository is the reference of the image identity to be matched. + // repository is required if matchPolicy is set to "ExactRepository". + // The value should be a repository name (by omitting the tag or digest) in a registry implementing the "Docker Registry HTTP API V2". For example, docker.io/library/busybox + // +required + Repository IdentityRepositoryPrefix `json:"repository"` +} + +type PolicyMatchRemapIdentity struct { + // prefix is required if matchPolicy is set to "RemapIdentity". + // prefix is the prefix of the image identity to be matched. + // If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). + // This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. + // The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + Prefix IdentityRepositoryPrefix `json:"prefix"` + // signedPrefix is required if matchPolicy is set to "RemapIdentity". + // signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as "prefix". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, + // or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. + // For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox. + // +required + SignedPrefix IdentityRepositoryPrefix `json:"signedPrefix"` +} + +// IdentityMatchPolicy defines the type of matching for "matchPolicy". +// +kubebuilder:validation:Enum=MatchRepoDigestOrExact;MatchRepository;ExactRepository;RemapIdentity +type IdentityMatchPolicy string + +const ( + IdentityMatchPolicyMatchRepoDigestOrExact IdentityMatchPolicy = "MatchRepoDigestOrExact" + IdentityMatchPolicyMatchRepository IdentityMatchPolicy = "MatchRepository" + IdentityMatchPolicyExactRepository IdentityMatchPolicy = "ExactRepository" + IdentityMatchPolicyRemapIdentity IdentityMatchPolicy = "RemapIdentity" +) + +// +k8s:deepcopy-gen=true +type ImagePolicyStatus struct { + // conditions provide details on the status of this API Resource. + // condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + // +kubebuilder:validation:MaxItems=8 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePolicyList is a list of ImagePolicy resources +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + + // items is a list of ImagePolicies + // +kubebuilder:validation:MaxItems=1000 + // +required + Items []ImagePolicy `json:"items"` +} + +const ( + // ImagePolicyPending indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid. + ImagePolicyPending = "Pending" + // ImagePolicyApplied indicates that the policy has been applied + ImagePolicyApplied = "Applied" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index f10ccb8557..effafde644 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -62,11 +62,13 @@ type InfrastructureStatus struct { // infrastructureName uniquely identifies a cluster with a human friendly name. // Once set it should not be changed. Must be of max length 27 and must have only // alphanumeric or hyphen characters. + // +optional InfrastructureName string `json:"infrastructureName"` // platform is the underlying infrastructure provider for the cluster. // // Deprecated: Use platformStatus.type instead. + // +optional Platform PlatformType `json:"platform,omitempty"` // platformStatus holds status information specific to the underlying @@ -78,17 +80,20 @@ type InfrastructureStatus struct { // etcd servers and clients. // For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery // deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release. + // +optional EtcdDiscoveryDomain string `json:"etcdDiscoveryDomain"` // apiServerURL is a valid URI with scheme 'https', address and // optionally a port (defaulting to 443). apiServerURL can be used by components like the web console // to tell users where to find the Kubernetes API. + // +optional APIServerURL string `json:"apiServerURL"` // apiServerInternalURL is a valid URI with scheme 'https', // address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components // like kubelets, to contact the Kubernetes API server using the // infrastructure provider rather than Kubernetes networking. + // +optional APIServerInternalURL string `json:"apiServerInternalURI"` // controlPlaneTopology expresses the expectations for operands that normally run on control nodes. @@ -99,7 +104,10 @@ type InfrastructureStatus struct { // its components are not visible within the cluster. // +kubebuilder:default=HighlyAvailable // +openshift:validation:FeatureGateAwareEnum:featureGate="",enum=HighlyAvailable;SingleReplica;External - // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=HighlyAvailableArbiter,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;External + // +openshift:validation:FeatureGateAwareEnum:featureGate=DualReplica,enum=HighlyAvailable;SingleReplica;DualReplica;External + // +openshift:validation:FeatureGateAwareEnum:requiredFeatureGate=HighlyAvailableArbiter;DualReplica,enum=HighlyAvailable;HighlyAvailableArbiter;SingleReplica;DualReplica;External + // +optional ControlPlaneTopology TopologyMode `json:"controlPlaneTopology"` // infrastructureTopology expresses the expectations for infrastructure services that do not run on control @@ -111,7 +119,8 @@ type InfrastructureStatus struct { // NOTE: External topology mode is not applicable for this field. // +kubebuilder:default=HighlyAvailable // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica - InfrastructureTopology TopologyMode `json:"infrastructureTopology"` + // +optional + InfrastructureTopology TopologyMode `json:"infrastructureTopology,omitempty"` // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. @@ -523,7 +532,7 @@ type AWSPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=AWSClusterHostedDNS + // +openshift:enable:FeatureGate=AWSClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -585,6 +594,19 @@ type AzurePlatformStatus struct { // +listType=atomic // +optional ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` + + // cloudLoadBalancerConfig holds configuration related to DNS and cloud + // load balancers. It allows configuration of in-cluster DNS as an alternative + // to the platform default DNS implementation. + // When using the ClusterHosted DNS type, Load Balancer IP addresses + // must be provided for the API and internal API load balancers as well as the + // ingress load balancer. + // + // +default={"dnsType": "PlatformDefault"} + // +kubebuilder:default={"dnsType": "PlatformDefault"} + // +openshift:enable:FeatureGate=AzureClusterHostedDNSInstall + // +optional + CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` } // AzureResourceTag is a tag to apply to Azure resources created for the cluster. @@ -628,7 +650,7 @@ const ( ) // GCPServiceEndpointName is the name of the GCP Service Endpoint. -// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;ServiceUsage;Storage;TagManager +// +kubebuilder:validation:Enum=Compute;Container;CloudResourceManager;DNS;File;IAM;IAMCredentials;OAuth;ServiceUsage;Storage;STS type GCPServiceEndpointName string const ( @@ -650,14 +672,20 @@ const ( // GCPServiceEndpointNameIAM is the name used for the GCP IAM Service endpoint. GCPServiceEndpointNameIAM GCPServiceEndpointName = "IAM" + // GCPServiceEndpointNameIAMCredentials is the name used for the GCP IAM Credentials Service endpoint. + GCPServiceEndpointNameIAMCredentials GCPServiceEndpointName = "IAMCredentials" + + // GCPServiceEndpointNameOAuth is the name used for the GCP OAuth2 Service endpoint. + GCPServiceEndpointNameOAuth GCPServiceEndpointName = "OAuth" + // GCPServiceEndpointNameServiceUsage is the name used for the GCP Service Usage Service endpoint. GCPServiceEndpointNameServiceUsage GCPServiceEndpointName = "ServiceUsage" // GCPServiceEndpointNameStorage is the name used for the GCP Storage Service endpoint. GCPServiceEndpointNameStorage GCPServiceEndpointName = "Storage" - // GCPServiceEndpointNameTagManager is the name used for the GCP Tag Manager Service endpoint. - GCPServiceEndpointNameTagManager GCPServiceEndpointName = "TagManager" + // GCPServiceEndpointNameSTS is the name used for the GCP STS Service endpoint. + GCPServiceEndpointNameSTS GCPServiceEndpointName = "STS" ) // GCPServiceEndpoint store the configuration of a custom url to @@ -695,8 +723,8 @@ type GCPServiceEndpoint struct { type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" -// +openshift:validation:FeatureGateAwareXValidation:featureGate=GCPLabelsTags,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` @@ -713,7 +741,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. @@ -724,7 +751,6 @@ type GCPPlatformStatus struct { // +listType=map // +listMapKey=key // +optional - // +openshift:enable:FeatureGate=GCPLabelsTags ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` // This field was introduced and removed under tech preview. @@ -741,7 +767,7 @@ type GCPPlatformStatus struct { // // +default={"dnsType": "PlatformDefault"} // +kubebuilder:default={"dnsType": "PlatformDefault"} - // +openshift:enable:FeatureGate=GCPClusterHostedDNS + // +openshift:enable:FeatureGate=GCPClusterHostedDNSInstall // +optional // +nullable CloudLoadBalancerConfig *CloudLoadBalancerConfig `json:"cloudLoadBalancerConfig,omitempty"` @@ -750,13 +776,13 @@ type GCPPlatformStatus struct { // used when creating clients to interact with GCP services. // When not specified, the default endpoint for the GCP region will be used. // Only 1 endpoint override is permitted for each GCP service. - // The maximum number of endpoint overrides allowed is 9. + // The maximum number of endpoint overrides allowed is 11. // +listType=map // +listMapKey=name - // +kubebuilder:validation:MaxItems=9 + // +kubebuilder:validation:MaxItems=11 // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x.name == y.name))",message="only 1 endpoint override is permitted per GCP service name" // +optional - // +openshift:enable:FeatureGate=GCPCustomAPIEndpoints + // +openshift:enable:FeatureGate=GCPCustomAPIEndpointsInstall ServiceEndpoints []GCPServiceEndpoint `json:"serviceEndpoints,omitempty"` } @@ -1006,7 +1032,6 @@ type BareMetalPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -1220,7 +1245,6 @@ type OvirtPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` } @@ -1390,7 +1414,6 @@ type VSpherePlatformTopology struct { // VSpherePlatformFailureDomainSpec. // For example, for zone=zonea, region=region1, and infrastructure name=test, // the template path would be calculated as //vm/test-rhcos-region1-zonea. - // +openshift:enable:FeatureGate=VSphereControlPlaneMachineSet // +kubebuilder:validation:MinLength=1 // +kubebuilder:validation:MaxLength=2048 // +kubebuilder:validation:Pattern=`^/.*?/vm/.*?` @@ -1558,8 +1581,7 @@ type VSpherePlatformSpec struct { // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. // +kubebuilder:validation:MinItems=0 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate="",maxItems=1 - // +openshift:validation:FeatureGateAwareMaxItems:featureGate=VSphereMultiVCenters,maxItems=3 + // +kubebuilder:validation:MaxItems=3 // +kubebuilder:validation:XValidation:rule="size(self) != size(oldSelf) ? size(oldSelf) == 0 && size(self) < 2 : true",message="vcenters cannot be added or removed once set" // +listType=atomic // +optional @@ -1671,7 +1693,6 @@ type VSpherePlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` @@ -1716,7 +1737,7 @@ type IBMCloudPlatformSpec struct { // serviceEndpoints is a list of custom endpoints which will override the default // service endpoints of an IBM service. These endpoints are used by components // within the cluster when trying to reach the IBM Cloud Services that have been - // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus // are updated to reflect the same custom endpoints. // A maximum of 13 service endpoints overrides are supported. @@ -1750,7 +1771,7 @@ type IBMCloudPlatformStatus struct { // serviceEndpoints is a list of custom endpoints which will override the default // service endpoints of an IBM service. These endpoints are used by components // within the cluster when trying to reach the IBM Cloud Services that have been - // overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each + // overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each // endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus // are updated to reflect the same custom endpoints. // +openshift:validation:FeatureGateAwareMaxItems:featureGate=DyanmicServiceEndpointIBMCloud,maxItems=13 @@ -2089,7 +2110,6 @@ type NutanixPlatformStatus struct { // loadBalancer defines how the load balancer used by the cluster is configured. // +default={"type": "OpenShiftManagedDefault"} // +kubebuilder:default={"type": "OpenShiftManagedDefault"} - // +openshift:enable:FeatureGate=BareMetalLoadBalancer // +optional LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 9492e08a72..f70fe8f440 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -150,7 +150,7 @@ type AWSIngressSpec struct { // +unionDiscriminator // +kubebuilder:validation:Enum:=NLB;Classic // +required - Type AWSLBType `json:"type,omitempty"` + Type AWSLBType `json:"type"` } type AWSLBType string diff --git a/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go new file mode 100644 index 0000000000..3293204fa4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/types_kmsencryption.go @@ -0,0 +1,55 @@ +package v1 + +// KMSConfig defines the configuration for the KMS instance +// that will be used with KMSEncryptionProvider encryption +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws config is required when kms provider type is AWS, and forbidden otherwise" +// +union +type KMSConfig struct { + // type defines the kind of platform for the KMS provider. + // Available provider types are AWS only. + // + // +unionDiscriminator + // +required + Type KMSProviderType `json:"type"` + + // aws defines the key config for using an AWS KMS instance + // for the encryption. The AWS KMS instance is managed + // by the user outside the purview of the control plane. + // + // +unionMember + // +optional + AWS *AWSKMSConfig `json:"aws,omitempty"` +} + +// AWSKMSConfig defines the KMS config specific to AWS KMS provider +type AWSKMSConfig struct { + // keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. + // The value must adhere to the format `arn:aws:kms:::key/`, where: + // - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. + // - `` is a 12-digit numeric identifier for the AWS account. + // - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens. + // + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9-]+$')",message="keyARN must follow the format `arn:aws:kms:::key/`. The account ID must be a 12 digit number and the region and key ID should consist only of lowercase hexadecimal characters and hyphens (-)." + // +required + KeyARN string `json:"keyARN"` + // region specifies the AWS region where the KMS instance exists, and follows the format + // `--`, e.g.: `us-east-1`. + // Only lowercase letters and hyphens followed by numbers are allowed. + // + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="self.matches('^[a-z0-9]+(-[a-z0-9]+)*$')",message="region must be a valid AWS region, consisting of lowercase characters, digits and hyphens (-) only." + // +required + Region string `json:"region"` +} + +// KMSProviderType is a specific supported KMS provider +// +kubebuilder:validation:Enum=AWS +type KMSProviderType string + +const ( + // AWSKMSProvider represents a supported KMS provider for use with AWS KMS + AWSKMSProvider KMSProviderType = "AWS" +) diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 41dc2eb97b..c0d1602b37 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -93,20 +93,25 @@ type NetworkSpec struct { type NetworkStatus struct { // IP address pool to use for pod IPs. // +listType=atomic + // +optional ClusterNetwork []ClusterNetworkEntry `json:"clusterNetwork,omitempty"` // IP address pool for services. // Currently, we only support a single entry here. // +listType=atomic + // +optional ServiceNetwork []string `json:"serviceNetwork,omitempty"` // networkType is the plugin that is deployed (e.g. OVNKubernetes). + // +optional NetworkType string `json:"networkType,omitempty"` // clusterNetworkMTU is the MTU for inter-pod networking. + // +optional ClusterNetworkMTU int `json:"clusterNetworkMTU,omitempty"` // migration contains the cluster network migration configuration. + // +optional Migration *NetworkMigration `json:"migration,omitempty"` // conditions represents the observations of a network.config current state. diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 3977f9f14b..1282f33158 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -74,14 +74,14 @@ type NodeStatus struct { Conditions []metav1.Condition `json:"conditions,omitempty"` } -// +kubebuilder:validation:Enum=v1;v2;"" +// +kubebuilder:validation:Enum=v2;"" type CgroupMode string const ( CgroupModeEmpty CgroupMode = "" // Empty string indicates to honor user set value on the system that should not be overridden by OpenShift CgroupModeV1 CgroupMode = "v1" CgroupModeV2 CgroupMode = "v2" - CgroupModeDefault CgroupMode = CgroupModeV1 + CgroupModeDefault CgroupMode = CgroupModeV2 ) // +kubebuilder:validation:Enum=Default;MediumUpdateAverageReaction;LowUpdateSlowReaction diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 1fddfa51e5..a4971a20c5 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -28,6 +28,7 @@ type OperatorHubSpec struct { type OperatorHubStatus struct { // sources encapsulates the result of applying the configuration for each // hub source + // +optional Sources []HubSourceStatus `json:"sources,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index 40b0c857b1..788e10479b 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 @@ -42,6 +42,11 @@ func (in *APIServer) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIServerEncryption) DeepCopyInto(out *APIServerEncryption) { *out = *in + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSConfig) + (*in).DeepCopyInto(*out) + } return } @@ -143,7 +148,7 @@ func (in *APIServerSpec) DeepCopyInto(out *APIServerSpec) { *out = make([]string, len(*in)) copy(*out, *in) } - out.Encryption = in.Encryption + in.Encryption.DeepCopyInto(&out.Encryption) if in.TLSSecurityProfile != nil { in, out := &in.TLSSecurityProfile, &out.TLSSecurityProfile *out = new(TLSSecurityProfile) @@ -211,6 +216,22 @@ func (in *AWSIngressSpec) DeepCopy() *AWSIngressSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSKMSConfig) DeepCopyInto(out *AWSKMSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSKMSConfig. +func (in *AWSKMSConfig) DeepCopy() *AWSKMSConfig { + if in == nil { + return nil + } + out := new(AWSKMSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSPlatformSpec) DeepCopyInto(out *AWSPlatformSpec) { *out = *in @@ -595,6 +616,11 @@ func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { *out = make([]AzureResourceTag, len(*in)) copy(*out, *in) } + if in.CloudLoadBalancerConfig != nil { + in, out := &in.CloudLoadBalancerConfig, &out.CloudLoadBalancerConfig + *out = new(CloudLoadBalancerConfig) + (*in).DeepCopyInto(*out) + } return } @@ -1003,6 +1029,112 @@ func (in *ClusterCondition) DeepCopy() *ClusterCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicy) DeepCopyInto(out *ClusterImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicy. +func (in *ClusterImagePolicy) DeepCopy() *ClusterImagePolicy { + if in == nil { + return nil + } + out := new(ClusterImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyList) DeepCopyInto(out *ClusterImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyList. +func (in *ClusterImagePolicyList) DeepCopy() *ClusterImagePolicyList { + if in == nil { + return nil + } + out := new(ClusterImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicySpec) DeepCopyInto(out *ClusterImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicySpec. +func (in *ClusterImagePolicySpec) DeepCopy() *ClusterImagePolicySpec { + if in == nil { + return nil + } + out := new(ClusterImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterImagePolicyStatus. +func (in *ClusterImagePolicyStatus) DeepCopy() *ClusterImagePolicyStatus { + if in == nil { + return nil + } + out := new(ClusterImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { *out = *in @@ -2000,6 +2132,22 @@ func (in *ExternalPlatformStatus) DeepCopy() *ExternalPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExtraMapping) DeepCopyInto(out *ExtraMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraMapping. +func (in *ExtraMapping) DeepCopy() *ExtraMapping { + if in == nil { + return nil + } + out := new(ExtraMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { *out = *in @@ -2192,6 +2340,33 @@ func (in *FeatureGateTests) DeepCopy() *FeatureGateTests { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FulcioCAWithRekor) DeepCopyInto(out *FulcioCAWithRekor) { + *out = *in + if in.FulcioCAData != nil { + in, out := &in.FulcioCAData, &out.FulcioCAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.FulcioSubject = in.FulcioSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FulcioCAWithRekor. +func (in *FulcioCAWithRekor) DeepCopy() *FulcioCAWithRekor { + if in == nil { + return nil + } + out := new(FulcioCAWithRekor) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformSpec) DeepCopyInto(out *GCPPlatformSpec) { *out = *in @@ -2885,6 +3060,112 @@ func (in *ImageList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicy) DeepCopyInto(out *ImagePolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicy. +func (in *ImagePolicy) DeepCopy() *ImagePolicy { + if in == nil { + return nil + } + out := new(ImagePolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyList) DeepCopyInto(out *ImagePolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyList. +func (in *ImagePolicyList) DeepCopy() *ImagePolicyList { + if in == nil { + return nil + } + out := new(ImagePolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicySpec) DeepCopyInto(out *ImagePolicySpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ImageScope, len(*in)) + copy(*out, *in) + } + in.Policy.DeepCopyInto(&out.Policy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicySpec. +func (in *ImagePolicySpec) DeepCopy() *ImagePolicySpec { + if in == nil { + return nil + } + out := new(ImagePolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyStatus. +func (in *ImagePolicyStatus) DeepCopy() *ImagePolicyStatus { + if in == nil { + return nil + } + out := new(ImagePolicyStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ImageSpec) DeepCopyInto(out *ImageSpec) { *out = *in @@ -3305,6 +3586,27 @@ func (in *IntermediateTLSProfile) DeepCopy() *IntermediateTLSProfile { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSConfig) DeepCopyInto(out *KMSConfig) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSKMSConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSConfig. +func (in *KMSConfig) DeepCopy() *KMSConfig { + if in == nil { + return nil + } + out := new(KMSConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KeystoneIdentityProvider) DeepCopyInto(out *KeystoneIdentityProvider) { *out = *in @@ -4674,6 +4976,49 @@ func (in *OvirtPlatformStatus) DeepCopy() *OvirtPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKI) DeepCopyInto(out *PKI) { + *out = *in + if in.CertificateAuthorityRootsData != nil { + in, out := &in.CertificateAuthorityRootsData, &out.CertificateAuthorityRootsData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CertificateAuthorityIntermediatesData != nil { + in, out := &in.CertificateAuthorityIntermediatesData, &out.CertificateAuthorityIntermediatesData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + out.PKICertificateSubject = in.PKICertificateSubject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKI. +func (in *PKI) DeepCopy() *PKI { + if in == nil { + return nil + } + out := new(PKI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PKICertificateSubject) DeepCopyInto(out *PKICertificateSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PKICertificateSubject. +func (in *PKICertificateSubject) DeepCopy() *PKICertificateSubject { + if in == nil { + return nil + } + out := new(PKICertificateSubject) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlatformSpec) DeepCopyInto(out *PlatformSpec) { *out = *in @@ -4846,6 +5191,133 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + in.RootOfTrust.DeepCopyInto(&out.RootOfTrust) + if in.SignedIdentity != nil { + in, out := &in.SignedIdentity, &out.SignedIdentity + *out = new(PolicyIdentity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyFulcioSubject) DeepCopyInto(out *PolicyFulcioSubject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyFulcioSubject. +func (in *PolicyFulcioSubject) DeepCopy() *PolicyFulcioSubject { + if in == nil { + return nil + } + out := new(PolicyFulcioSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyIdentity) DeepCopyInto(out *PolicyIdentity) { + *out = *in + if in.PolicyMatchExactRepository != nil { + in, out := &in.PolicyMatchExactRepository, &out.PolicyMatchExactRepository + *out = new(PolicyMatchExactRepository) + **out = **in + } + if in.PolicyMatchRemapIdentity != nil { + in, out := &in.PolicyMatchRemapIdentity, &out.PolicyMatchRemapIdentity + *out = new(PolicyMatchRemapIdentity) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyIdentity. +func (in *PolicyIdentity) DeepCopy() *PolicyIdentity { + if in == nil { + return nil + } + out := new(PolicyIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchExactRepository) DeepCopyInto(out *PolicyMatchExactRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchExactRepository. +func (in *PolicyMatchExactRepository) DeepCopy() *PolicyMatchExactRepository { + if in == nil { + return nil + } + out := new(PolicyMatchExactRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyMatchRemapIdentity) DeepCopyInto(out *PolicyMatchRemapIdentity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyMatchRemapIdentity. +func (in *PolicyMatchRemapIdentity) DeepCopy() *PolicyMatchRemapIdentity { + if in == nil { + return nil + } + out := new(PolicyMatchRemapIdentity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRootOfTrust) DeepCopyInto(out *PolicyRootOfTrust) { + *out = *in + if in.PublicKey != nil { + in, out := &in.PublicKey, &out.PublicKey + *out = new(PublicKey) + (*in).DeepCopyInto(*out) + } + if in.FulcioCAWithRekor != nil { + in, out := &in.FulcioCAWithRekor, &out.FulcioCAWithRekor + *out = new(FulcioCAWithRekor) + (*in).DeepCopyInto(*out) + } + if in.PKI != nil { + in, out := &in.PKI, &out.PKI + *out = new(PKI) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRootOfTrust. +func (in *PolicyRootOfTrust) DeepCopy() *PolicyRootOfTrust { + if in == nil { + return nil + } + out := new(PolicyRootOfTrust) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PowerVSPlatformSpec) DeepCopyInto(out *PowerVSPlatformSpec) { *out = *in @@ -5146,6 +5618,32 @@ func (in *ProxyStatus) DeepCopy() *ProxyStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublicKey) DeepCopyInto(out *PublicKey) { + *out = *in + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.RekorKeyData != nil { + in, out := &in.RekorKeyData, &out.RekorKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicKey. +func (in *PublicKey) DeepCopy() *PublicKey { + if in == nil { + return nil + } + out := new(PublicKey) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { *out = *in @@ -5685,6 +6183,16 @@ func (in *TokenClaimMappings) DeepCopyInto(out *TokenClaimMappings) { *out = *in in.Username.DeepCopyInto(&out.Username) out.Groups = in.Groups + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(TokenClaimOrExpressionMapping) + **out = **in + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make([]ExtraMapping, len(*in)) + copy(*out, *in) + } return } @@ -5698,6 +6206,22 @@ func (in *TokenClaimMappings) DeepCopy() *TokenClaimMappings { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenClaimOrExpressionMapping) DeepCopyInto(out *TokenClaimOrExpressionMapping) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenClaimOrExpressionMapping. +func (in *TokenClaimOrExpressionMapping) DeepCopy() *TokenClaimOrExpressionMapping { + if in == nil { + return nil + } + out := new(TokenClaimOrExpressionMapping) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenClaimValidationRule) DeepCopyInto(out *TokenClaimValidationRule) { *out = *in @@ -5818,7 +6342,6 @@ func (in *UpdateHistory) DeepCopy() *UpdateHistory { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UsernameClaimMapping) DeepCopyInto(out *UsernameClaimMapping) { *out = *in - out.TokenClaimMapping = in.TokenClaimMapping if in.Prefix != nil { in, out := &in.Prefix, &out.Prefix *out = new(UsernamePrefix) diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml index f8182fffec..6d756e8f90 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.featuregated-crd-manifests.yaml @@ -5,7 +5,8 @@ apiservers.config.openshift.io: CRDName: apiservers.config.openshift.io Capability: "" Category: "" - FeatureGates: [] + FeatureGates: + - KMSEncryptionProvider FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -29,6 +30,7 @@ authentications.config.openshift.io: Category: "" FeatureGates: - ExternalOIDC + - ExternalOIDCWithUIDAndExtraClaimMappings FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" @@ -64,6 +66,30 @@ builds.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +clusterimagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: clusterimagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ClusterImagePolicy + Labels: {} + PluralName: clusterimagepolicies + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + clusteroperators.config.openshift.io: Annotations: include.release.openshift.io/self-managed-high-availability: "true" @@ -280,6 +306,30 @@ imagedigestmirrorsets.config.openshift.io: TopLevelFeatureGates: [] Version: v1 +imagepolicies.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2310 + CRDName: imagepolicies.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - SigstoreImageVerification + - SigstoreImageVerificationPKI + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: ImagePolicy + Labels: {} + PluralName: imagepolicies + PrinterColumns: [] + Scope: Namespaced + ShortNames: null + TopLevelFeatureGates: + - SigstoreImageVerification + Version: v1 + imagetagmirrorsets.config.openshift.io: Annotations: release.openshift.io/bootstrap-required: "true" @@ -311,19 +361,17 @@ infrastructures.config.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSClusterHostedDNS - - BareMetalLoadBalancer + - AWSClusterHostedDNSInstall + - AzureClusterHostedDNSInstall - DualReplica - DyanmicServiceEndpointIBMCloud - - GCPClusterHostedDNS - - GCPCustomAPIEndpoints - - GCPLabelsTags + - GCPClusterHostedDNSInstall + - GCPCustomAPIEndpointsInstall - HighlyAvailableArbiter + - HighlyAvailableArbiter+DualReplica - NutanixMultiSubnets - - VSphereControlPlaneMachineSet - VSphereHostVMGroupZonal - VSphereMultiNetworks - - VSphereMultiVCenters FilenameOperatorName: config-operator FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_10" diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 819b713ad1..e3494151c6 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -277,7 +277,9 @@ func (APIServer) SwaggerDoc() map[string]string { } var map_APIServerEncryption = map[string]string{ + "": "APIServerEncryption is used to encrypt sensitive resources on the cluster.", "type": "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices.\n\nWhen encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is:\n\n 1. secrets\n 2. configmaps\n 3. routes.route.openshift.io\n 4. oauthaccesstokens.oauth.openshift.io\n 5. oauthauthorizetokens.oauth.openshift.io", + "kms": "kms defines the configuration for the external KMS instance that manages the encryption keys, when KMS encryption is enabled sensitive resources will be encrypted using keys managed by an externally configured KMS instance.\n\nThe Key Management Service (KMS) instance provides symmetric encryption and is responsible for managing the lifecyle of the encryption keys outside of the control plane. This allows integration with an external provider to manage the data encryption keys securely.", } func (APIServerEncryption) SwaggerDoc() map[string]string { @@ -316,7 +318,7 @@ var map_APIServerSpec = map[string]string{ "clientCA": "clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data[\"ca-bundle.crt\"] - CA bundle.", "additionalCORSAllowedOrigins": "additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language.", "encryption": "encryption allows the configuration of encryption of resources at the datastore layer.", - "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nIf unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available minTLSVersion is VersionTLS12.", + "tlsSecurityProfile": "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers.\n\nWhen omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is the Intermediate profile.", "audit": "audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster.", } @@ -394,12 +396,23 @@ func (DeprecatedWebhookTokenAuthenticator) SwaggerDoc() map[string]string { return map_DeprecatedWebhookTokenAuthenticator } +var map_ExtraMapping = map[string]string{ + "": "ExtraMapping allows specifying a key and CEL expression to evaluate the keys' value. It is used to create additional mappings and attributes added to a cluster identity from a provided authentication token.", + "key": "key is a required field that specifies the string to use as the extra attribute key.\n\nkey must be a domain-prefix path (e.g 'example.org/foo'). key must not exceed 510 characters in length. key must contain the '/' character, separating the domain and path characters. key must not be empty.\n\nThe domain portion of the key (string of characters prior to the '/') must be a valid RFC1123 subdomain. It must not exceed 253 characters in length. It must start and end with an alphanumeric character. It must only contain lower case alphanumeric characters and '-' or '.'. It must not use the reserved domains, or be subdomains of, \"kubernetes.io\", \"k8s.io\", and \"openshift.io\".\n\nThe path portion of the key (string of characters after the '/') must not be empty and must consist of at least one alphanumeric character, percent-encoded octets, '-', '.', '_', '~', '!', '$', '&', ''', '(', ')', '*', '+', ',', ';', '=', and ':'. It must not exceed 256 characters in length.", + "valueExpression": "valueExpression is a required field to specify the CEL expression to extract the extra attribute value from a JWT token's claims. valueExpression must produce a string or string array value. \"\", [], and null are treated as the extra mapping not being present. Empty string values within an array are filtered out.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nvalueExpression must not exceed 1024 characters in length. valueExpression must not be empty.", +} + +func (ExtraMapping) SwaggerDoc() map[string]string { + return map_ExtraMapping +} + var map_OIDCClientConfig = map[string]string{ - "componentName": "componentName is the name of the component that is supposed to consume this client configuration", - "componentNamespace": "componentNamespace is the namespace of the component that is supposed to consume this client configuration", - "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", - "clientSecret": "clientSecret refers to a secret in the `openshift-config` namespace that contains the client secret in the `clientSecret` key of the `.data` field", - "extraScopes": "extraScopes is an optional set of scopes to request tokens with.", + "": "OIDCClientConfig configures how platform clients interact with identity providers as an authentication method", + "componentName": "componentName is a required field that specifies the name of the platform component being configured to use the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component being configured to use the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "clientID": "clientID is a required field that configures the client identifier, from the identity provider, that the platform component uses for authentication requests made to the identity provider. The identity provider must accept this identifier for platform components to be able to use the identity provider as an authentication mode.\n\nclientID must not be an empty string (\"\").", + "clientSecret": "clientSecret is an optional field that configures the client secret used by the platform component when making authentication requests to the identity provider.\n\nWhen not specified, no client secret will be used when making authentication requests to the identity provider.\n\nWhen specified, clientSecret references a Secret in the 'openshift-config' namespace that contains the client secret in the 'clientSecret' key of the '.data' field. The client secret will be used when making authentication requests to the identity provider.\n\nPublic clients do not require a client secret but private clients do require a client secret to work with the identity provider.", + "extraScopes": "extraScopes is an optional field that configures the extra scopes that should be requested by the platform component when making authentication requests to the identity provider. This is useful if you have configured claim mappings that requires specific scopes to be requested beyond the standard OIDC scopes.\n\nWhen omitted, no additional scopes are requested.", } func (OIDCClientConfig) SwaggerDoc() map[string]string { @@ -407,9 +420,10 @@ func (OIDCClientConfig) SwaggerDoc() map[string]string { } var map_OIDCClientReference = map[string]string{ - "oidcProviderName": "OIDCName refers to the `name` of the provider from `oidcProviders`", - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "clientID": "clientID is the identifier of the OIDC client from the OIDC provider", + "": "OIDCClientReference is a reference to a platform component client configuration.", + "oidcProviderName": "oidcProviderName is a required reference to the 'name' of the identity provider configured in 'oidcProviders' that this client is associated with.\n\noidcProviderName must not be an empty string (\"\").", + "issuerURL": "issuerURL is a required field that specifies the URL of the identity provider that this client is configured to make requests against.\n\nissuerURL must use the 'https' scheme.", + "clientID": "clientID is a required field that specifies the client identifier, from the identity provider, that the platform component is using for authentication requests made to the identity provider.\n\nclientID must not be empty.", } func (OIDCClientReference) SwaggerDoc() map[string]string { @@ -417,10 +431,11 @@ func (OIDCClientReference) SwaggerDoc() map[string]string { } var map_OIDCClientStatus = map[string]string{ - "componentName": "componentName is the name of the component that will consume a client configuration.", - "componentNamespace": "componentNamespace is the namespace of the component that will consume a client configuration.", - "currentOIDCClients": "currentOIDCClients is a list of clients that the component is currently using.", - "consumingUsers": "consumingUsers is a slice of ServiceAccounts that need to have read permission on the `clientSecret` secret.", + "": "OIDCClientStatus represents the current state of platform components and how they interact with the configured identity providers.", + "componentName": "componentName is a required field that specifies the name of the platform component using the identity provider as an authentication mode. It is used in combination with componentNamespace as a unique identifier.\n\ncomponentName must not be an empty string (\"\") and must not exceed 256 characters in length.", + "componentNamespace": "componentNamespace is a required field that specifies the namespace in which the platform component using the identity provider as an authentication mode is running. It is used in combination with componentName as a unique identifier.\n\ncomponentNamespace must not be an empty string (\"\") and must not exceed 63 characters in length.", + "currentOIDCClients": "currentOIDCClients is an optional list of clients that the component is currently using. Entries must have unique issuerURL/clientID pairs.", + "consumingUsers": "consumingUsers is an optional list of ServiceAccounts requiring read permissions on the `clientSecret` secret.\n\nconsumingUsers must not exceed 5 entries.", "conditions": "conditions are used to communicate the state of the `oidcClients` entry.\n\nSupported conditions include Available, Degraded and Progressing.\n\nIf Available is true, the component is successfully using the configured client. If Degraded is true, that means something has gone wrong trying to handle the client configuration. If Progressing is true, that means the component is taking some action related to the `oidcClients` entry.", } @@ -429,11 +444,11 @@ func (OIDCClientStatus) SwaggerDoc() map[string]string { } var map_OIDCProvider = map[string]string{ - "name": "name of the OIDC provider", - "issuer": "issuer describes atributes of the OIDC token issuer", - "oidcClients": "oidcClients contains configuration for the platform's clients that need to request tokens from the issuer", - "claimMappings": "claimMappings describes rules on how to transform information from an ID token into a cluster identity", - "claimValidationRules": "claimValidationRules are rules that are applied to validate token claims to authenticate users.", + "name": "name is a required field that configures the unique human-readable identifier associated with the identity provider. It is used to distinguish between multiple identity providers and has no impact on token validation or authentication mechanics.\n\nname must not be an empty string (\"\").", + "issuer": "issuer is a required field that configures how the platform interacts with the identity provider and how tokens issued from the identity provider are evaluated by the Kubernetes API server.", + "oidcClients": "oidcClients is an optional field that configures how on-cluster, platform clients should request tokens from the identity provider. oidcClients must not exceed 20 entries and entries must have unique namespace/name pairs.", + "claimMappings": "claimMappings is a required field that configures the rules to be used by the Kubernetes API server for translating claims in a JWT token, issued by the identity provider, to a cluster identity.", + "claimValidationRules": "claimValidationRules is an optional field that configures the rules to be used by the Kubernetes API server for validating the claims in a JWT token issued by the identity provider.\n\nValidation rules are joined via an AND operation.", } func (OIDCProvider) SwaggerDoc() map[string]string { @@ -441,7 +456,8 @@ func (OIDCProvider) SwaggerDoc() map[string]string { } var map_PrefixedClaimMapping = map[string]string{ - "prefix": "prefix is a string to prefix the value from the token in the result of the claim mapping.\n\nBy default, no prefixing occurs.\n\nExample: if `prefix` is set to \"myoidc:\"\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", + "": "PrefixedClaimMapping configures a claim mapping that allows for an optional prefix.", + "prefix": "prefix is an optional field that configures the prefix that will be applied to the cluster identity attribute during the process of mapping JWT claims to cluster identity attributes.\n\nWhen omitted (\"\"), no prefix is applied to the cluster identity attribute.\n\nExample: if `prefix` is set to \"myoidc:\" and the `claim` in JWT contains an array of strings \"a\", \"b\" and \"c\", the mapping will result in an array of string \"myoidc:a\", \"myoidc:b\" and \"myoidc:c\".", } func (PrefixedClaimMapping) SwaggerDoc() map[string]string { @@ -449,7 +465,8 @@ func (PrefixedClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMapping = map[string]string{ - "claim": "claim is a JWT token claim to be used in the mapping", + "": "TokenClaimMapping allows specifying a JWT token claim to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.", } func (TokenClaimMapping) SwaggerDoc() map[string]string { @@ -457,17 +474,29 @@ func (TokenClaimMapping) SwaggerDoc() map[string]string { } var map_TokenClaimMappings = map[string]string{ - "username": "username is a name of the claim that should be used to construct usernames for the cluster identity.\n\nDefault value: \"sub\"", - "groups": "groups is a name of the claim that should be used to construct groups for the cluster identity. The referenced claim must use array of strings values.", + "username": "username is a required field that configures how the username of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider.", + "groups": "groups is an optional field that configures how the groups of a cluster identity should be constructed from the claims in a JWT token issued by the identity provider. When referencing a claim, if the claim is present in the JWT token, its value must be a list of groups separated by a comma (','). For example - '\"example\"' and '\"exampleOne\", \"exampleTwo\", \"exampleThree\"' are valid claim values.", + "uid": "uid is an optional field for configuring the claim mapping used to construct the uid for the cluster identity.\n\nWhen using uid.claim to specify the claim it must be a single string value. When using uid.expression the expression must result in a single string value.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose a default, which is subject to change over time. The current default is to use the 'sub' claim.", + "extra": "extra is an optional field for configuring the mappings used to construct the extra attribute for the cluster identity. When omitted, no extra attributes will be present on the cluster identity. key values for extra mappings must be unique. A maximum of 32 extra attribute mappings may be provided.", } func (TokenClaimMappings) SwaggerDoc() map[string]string { return map_TokenClaimMappings } +var map_TokenClaimOrExpressionMapping = map[string]string{ + "": "TokenClaimOrExpressionMapping allows specifying either a JWT token claim or CEL expression to be used when mapping claims from an authentication token to cluster identities.", + "claim": "claim is an optional field for specifying the JWT token claim that is used in the mapping. The value of this claim will be assigned to the field in which this mapping is associated.\n\nPrecisely one of claim or expression must be set. claim must not be specified when expression is set. When specified, claim must be at least 1 character in length and must not exceed 256 characters in length.", + "expression": "expression is an optional field for specifying a CEL expression that produces a string value from JWT token claims.\n\nCEL expressions have access to the token claims through a CEL variable, 'claims'. 'claims' is a map of claim names to claim values. For example, the 'sub' claim value can be accessed as 'claims.sub'. Nested claims can be accessed using dot notation ('claims.foo.bar').\n\nPrecisely one of claim or expression must be set. expression must not be specified when claim is set. When specified, expression must be at least 1 character in length and must not exceed 1024 characters in length.", +} + +func (TokenClaimOrExpressionMapping) SwaggerDoc() map[string]string { + return map_TokenClaimOrExpressionMapping +} + var map_TokenClaimValidationRule = map[string]string{ - "type": "type sets the type of the validation rule", - "requiredClaim": "requiredClaim allows configuring a required claim name and its expected value", + "type": "type is an optional field that configures the type of the validation rule.\n\nAllowed values are 'RequiredClaim' and omitted (not provided or an empty string).\n\nWhen set to 'RequiredClaim', the Kubernetes API server will be configured to validate that the incoming JWT contains the required claim and that its value matches the required value.\n\nDefaults to 'RequiredClaim'.", + "requiredClaim": "requiredClaim is an optional field that configures the required claim and value that the Kubernetes API server will use to validate if an incoming JWT is valid for this identity provider.", } func (TokenClaimValidationRule) SwaggerDoc() map[string]string { @@ -475,9 +504,9 @@ func (TokenClaimValidationRule) SwaggerDoc() map[string]string { } var map_TokenIssuer = map[string]string{ - "issuerURL": "URL is the serving URL of the token issuer. Must use the https:// scheme.", - "audiences": "audiences is an array of audiences that the token was issued for. Valid tokens must include at least one of these values in their \"aud\" claim. Must be set to exactly one value.", - "issuerCertificateAuthority": "CertificateAuthority is a reference to a config map in the configuration namespace. The .data of the configMap must contain the \"ca-bundle.crt\" key. If unset, system trust is used instead.", + "issuerURL": "issuerURL is a required field that configures the URL used to issue tokens by the identity provider. The Kubernetes API server determines how authentication tokens should be handled by matching the 'iss' claim in the JWT to the issuerURL of configured identity providers.\n\nMust be at least 1 character and must not exceed 512 characters in length. Must be a valid URL that uses the 'https' scheme and does not contain a query, fragment or user.", + "audiences": "audiences is a required field that configures the acceptable audiences the JWT token, issued by the identity provider, must be issued to. At least one of the entries must match the 'aud' claim in the JWT token.\n\naudiences must contain at least one entry and must not exceed ten entries.", + "issuerCertificateAuthority": "issuerCertificateAuthority is an optional field that configures the certificate authority, used by the Kubernetes API server, to validate the connection to the identity provider when fetching discovery information.\n\nWhen not specified, the system trust is used.\n\nWhen specified, it must reference a ConfigMap in the openshift-config namespace containing the PEM-encoded CA certificates under the 'ca-bundle.crt' key in the data field of the ConfigMap.", } func (TokenIssuer) SwaggerDoc() map[string]string { @@ -485,8 +514,8 @@ func (TokenIssuer) SwaggerDoc() map[string]string { } var map_TokenRequiredClaim = map[string]string{ - "claim": "claim is a name of a required claim. Only claims with string values are supported.", - "requiredValue": "requiredValue is the required value for the claim.", + "claim": "claim is a required field that configures the name of the required claim. When taken from the JWT claims, claim must be a string value.\n\nclaim must not be an empty string (\"\").", + "requiredValue": "requiredValue is a required field that configures the value that 'claim' must have when taken from the incoming JWT claims. If the value in the JWT claims does not match, the token will be rejected for authentication.\n\nrequiredValue must not be an empty string (\"\").", } func (TokenRequiredClaim) SwaggerDoc() map[string]string { @@ -494,13 +523,24 @@ func (TokenRequiredClaim) SwaggerDoc() map[string]string { } var map_UsernameClaimMapping = map[string]string{ - "prefixPolicy": "prefixPolicy specifies how a prefix should apply.\n\nBy default, claims other than `email` will be prefixed with the issuer URL to prevent naming clashes with other plugins.\n\nSet to \"NoPrefix\" to disable prefixing.\n\nExample:\n (1) `prefix` is set to \"myoidc:\" and `claim` is set to \"username\".\n If the JWT claim `username` contains value `userA`, the resulting\n mapped value will be \"myoidc:userA\".\n (2) `prefix` is set to \"myoidc:\" and `claim` is set to \"email\". If the\n JWT `email` claim contains value \"userA@myoidc.tld\", the resulting\n mapped value will be \"myoidc:userA@myoidc.tld\".\n (3) `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n (a) \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n (b) \"email\": the mapped value will be \"userA@myoidc.tld\"", + "claim": "claim is a required field that configures the JWT token claim whose value is assigned to the cluster identity field associated with this mapping.\n\nclaim must not be an empty string (\"\") and must not exceed 256 characters.", + "prefixPolicy": "prefixPolicy is an optional field that configures how a prefix should be applied to the value of the JWT claim specified in the 'claim' field.\n\nAllowed values are 'Prefix', 'NoPrefix', and omitted (not provided or an empty string).\n\nWhen set to 'Prefix', the value specified in the prefix field will be prepended to the value of the JWT claim. The prefix field must be set when prefixPolicy is 'Prefix'.\n\nWhen set to 'NoPrefix', no prefix will be prepended to the value of the JWT claim.\n\nWhen omitted, this means no opinion and the platform is left to choose any prefixes that are applied which is subject to change over time. Currently, the platform prepends `{issuerURL}#` to the value of the JWT claim when the claim is not 'email'. As an example, consider the following scenario:\n `prefix` is unset, `issuerURL` is set to `https://myoidc.tld`,\n the JWT claims include \"username\":\"userA\" and \"email\":\"userA@myoidc.tld\",\n and `claim` is set to:\n - \"username\": the mapped value will be \"https://myoidc.tld#userA\"\n - \"email\": the mapped value will be \"userA@myoidc.tld\"", + "prefix": "prefix configures the prefix that should be prepended to the value of the JWT claim.\n\nprefix must be set when prefixPolicy is set to 'Prefix' and must be unset otherwise.", } func (UsernameClaimMapping) SwaggerDoc() map[string]string { return map_UsernameClaimMapping } +var map_UsernamePrefix = map[string]string{ + "": "UsernamePrefix configures the string that should be used as a prefix for username claim mappings.", + "prefixString": "prefixString is a required field that configures the prefix that will be applied to cluster identity username attribute during the process of mapping JWT claims to cluster identity attributes.\n\nprefixString must not be an empty string (\"\").", +} + +func (UsernamePrefix) SwaggerDoc() map[string]string { + return map_UsernamePrefix +} + var map_WebhookTokenAuthenticator = map[string]string{ "": "webhookTokenAuthenticator holds the necessary configuration options for a remote token authenticator", "kubeConfig": "kubeConfig references a secret that contains kube config file data which describes how to access the remote webhook service. The namespace for the referenced secret is openshift-config.\n\nFor further details, see:\n\nhttps://kubernetes.io/docs/reference/access-authn-authz/authentication/#webhook-token-authentication\n\nThe key \"kubeConfig\" is used to locate the data. If the secret or expected key is not found, the webhook is not honored. If the specified kube config data is not valid, the webhook is not honored.", @@ -571,8 +611,47 @@ func (ImageLabel) SwaggerDoc() map[string]string { return map_ImageLabel } +var map_ClusterImagePolicy = map[string]string{ + "": "ClusterImagePolicy holds cluster-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains the configuration for the cluster image policy.", + "status": "status contains the observed state of the resource.", +} + +func (ClusterImagePolicy) SwaggerDoc() map[string]string { + return map_ClusterImagePolicy +} + +var map_ClusterImagePolicyList = map[string]string{ + "": "ClusterImagePolicyList is a list of ClusterImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ClusterImagePolices", +} + +func (ClusterImagePolicyList) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyList +} + +var map_ClusterImagePolicySpec = map[string]string{ + "": "CLusterImagePolicySpec is the specification of the ClusterImagePolicy custom resource.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ClusterImagePolicySpec) SwaggerDoc() map[string]string { + return map_ClusterImagePolicySpec +} + +var map_ClusterImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource.", +} + +func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ClusterImagePolicyStatus +} + var map_ClusterOperator = map[string]string{ - "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterOperator holds the status of a core or optional OpenShift component managed by the Cluster Version Operator (CVO). This object is used by operators to convey their state to the rest of the cluster. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec holds configuration that could apply to any operator.", "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", @@ -700,7 +779,7 @@ var map_ClusterVersionSpec = map[string]string{ "clusterID": "clusterID uniquely identifies this cluster. This is expected to be an RFC4122 UUID value (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx in hexadecimal values). This is a required field.", "desiredUpdate": "desiredUpdate is an optional field that indicates the desired value of the cluster version. Setting this value will trigger an upgrade (if the current version does not match the desired version). The set of recommended update values is listed as part of available updates in status, and setting values outside that range may cause the upgrade to fail.\n\nSome of the fields are inter-related with restrictions and meanings described here. 1. image is specified, version is specified, architecture is specified. API validation error. 2. image is specified, version is specified, architecture is not specified. The version extracted from the referenced image must match the specified version. 3. image is specified, version is not specified, architecture is specified. API validation error. 4. image is specified, version is not specified, architecture is not specified. image is used. 5. image is not specified, version is specified, architecture is specified. version and desired architecture are used to select an image. 6. image is not specified, version is specified, architecture is not specified. version and current architecture are used to select an image. 7. image is not specified, version is not specified, architecture is specified. API validation error. 8. image is not specified, version is not specified, architecture is not specified. API validation error.\n\nIf an upgrade fails the operator will halt and report status about the failing component. Setting the desired update value back to the previous version will cause a rollback to be attempted. Not all rollbacks will succeed.", "upstream": "upstream may be used to specify the preferred update server. By default it will use the appropriate update server for the cluster and region.", - "channel": "channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters.", + "channel": "channel is an identifier for explicitly requesting a non-default set of updates to be applied to this cluster. The default channel will contain stable updates that are appropriate for production clusters.", "capabilities": "capabilities configures the installation of optional, core cluster components. A null value here is identical to an empty object; see the child properties for default semantics.", "signatureStores": "signatureStores contains the upstream URIs to verify release signatures and optional reference to a config map by name containing the PEM-encoded CA bundle.\n\nBy default, CVO will use existing signature stores if this property is empty. The CVO will check the release signatures in the local ConfigMaps first. It will search for a valid signature in these stores in parallel only when local ConfigMaps did not include a valid signature. Validation will fail if none of the signature stores reply with valid signature before timeout. Setting signatureStores will replace the default signature stores with custom signature stores. Default stores can be used with custom signature stores by adding them manually.\n\nA maximum of 32 signature stores may be configured.", "overrides": "overrides is list of overides for components that are managed by cluster version operator. Marking a component unmanaged will prevent the operator from creating or updating the object.", @@ -814,7 +893,7 @@ var map_UpdateHistory = map[string]string{ "version": "version is a semantic version identifying the update version. If the requested image does not define a version, or if a failure occurs retrieving the image, this value may be empty.", "image": "image is a container image location that contains the update. This value is always populated.", "verified": "verified indicates whether the provided update was properly verified before it was installed. If this is false the cluster may not be trusted. Verified does not cover upgradeable checks that depend on the cluster state at the time when the update target was accepted.", - "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overriden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", + "acceptedRisks": "acceptedRisks records risks which were accepted to initiate the update. For example, it may menition an Upgradeable=False or missing signature that was overridden via desiredUpdate.force, or an update that was initiated despite not being in the availableUpdates set of recommended update targets.", } func (UpdateHistory) SwaggerDoc() map[string]string { @@ -1135,6 +1214,147 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { return map_ImageDigestMirrors } +var map_FulcioCAWithRekor = map[string]string{ + "": "FulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key.", + "fulcioCAData": "fulcioCAData is a required field contains inline base64-encoded data for the PEM format fulcio CA. fulcioCAData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is a required field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", + "fulcioSubject": "fulcioSubject is a required field specifies OIDC issuer and the email of the Fulcio authentication configuration.", +} + +func (FulcioCAWithRekor) SwaggerDoc() map[string]string { + return map_FulcioCAWithRekor +} + +var map_ImagePolicy = map[string]string{ + "": "ImagePolicy holds namespace-wide configuration for image signature verification\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", +} + +func (ImagePolicy) SwaggerDoc() map[string]string { + return map_ImagePolicy +} + +var map_ImagePolicyList = map[string]string{ + "": "ImagePolicyList is a list of ImagePolicy resources\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of ImagePolicies", +} + +func (ImagePolicyList) SwaggerDoc() map[string]string { + return map_ImagePolicyList +} + +var map_ImagePolicySpec = map[string]string{ + "": "ImagePolicySpec is the specification of the ImagePolicy CRD.", + "scopes": "scopes is a required field that defines the list of image identities assigned to a policy. Each item refers to a scope in a registry implementing the \"Docker Registry HTTP API V2\". Scopes matching individual images are named Docker references in the fully expanded form, either using a tag or digest. For example, docker.io/library/busybox:latest (not busybox:latest). More general scopes are prefixes of individual-image scopes, and specify a repository (by omitting the tag or digest), a repository namespace, or a registry host (by only specifying the host name and possibly a port number) or a wildcard expression starting with `*.`, for matching all subdomains (not including a port number). Wildcards are only supported for subdomain matching, and may not be used in the middle of the host, i.e. *.example.com is a valid case, but example*.*.com is not. This support no more than 256 scopes in one object. If multiple scopes match a given image, only the policy requirements for the most specific scope apply. The policy requirements for more general scopes are ignored. In addition to setting a policy appropriate for your own deployed applications, make sure that a policy on the OpenShift image repositories quay.io/openshift-release-dev/ocp-release, quay.io/openshift-release-dev/ocp-v4.0-art-dev (or on a more general scope) allows deployment of the OpenShift images required for cluster operation. If a scope is configured in both the ClusterImagePolicy and the ImagePolicy, or if the scope in ImagePolicy is nested under one of the scopes from the ClusterImagePolicy, only the policy from the ClusterImagePolicy will be applied. For additional details about the format, please refer to the document explaining the docker transport field, which can be found at: https://github.com/containers/image/blob/main/docs/containers-policy.json.5.md#docker", + "policy": "policy is a required field that contains configuration to allow scopes to be verified, and defines how images not matching the verification policy will be treated.", +} + +func (ImagePolicySpec) SwaggerDoc() map[string]string { + return map_ImagePolicySpec +} + +var map_ImagePolicyStatus = map[string]string{ + "conditions": "conditions provide details on the status of this API Resource. condition type 'Pending' indicates that the customer resource contains a policy that cannot take effect. It is either overwritten by a global policy or the image scope is not valid.", +} + +func (ImagePolicyStatus) SwaggerDoc() map[string]string { + return map_ImagePolicyStatus +} + +var map_PKI = map[string]string{ + "": "PKI defines the root of trust based on Root CA(s) and corresponding intermediate certificates.", + "caRootsData": "caRootsData contains base64-encoded data of a certificate bundle PEM file, which contains one or more CA roots in the PEM format. The total length of the data must not exceed 8192 characters. ", + "caIntermediatesData": "caIntermediatesData contains base64-encoded data of a certificate bundle PEM file, which contains one or more intermediate certificates in the PEM format. The total length of the data must not exceed 8192 characters. caIntermediatesData requires caRootsData to be set. ", + "pkiCertificateSubject": "pkiCertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", +} + +func (PKI) SwaggerDoc() map[string]string { + return map_PKI +} + +var map_PKICertificateSubject = map[string]string{ + "": "PKICertificateSubject defines the requirements imposed on the subject to which the certificate was issued.", + "email": "email specifies the expected email address imposed on the subject to which the certificate was issued, and must match the email address listed in the Subject Alternative Name (SAN) field of the certificate. The email must be a valid email address and at most 320 characters in length.", + "hostname": "hostname specifies the expected hostname imposed on the subject to which the certificate was issued, and it must match the hostname listed in the Subject Alternative Name (SAN) DNS field of the certificate. The hostname must be a valid dns 1123 subdomain name, optionally prefixed by '*.', and at most 253 characters in length. It must consist only of lowercase alphanumeric characters, hyphens, periods and the optional preceding asterisk.", +} + +func (PKICertificateSubject) SwaggerDoc() map[string]string { + return map_PKICertificateSubject +} + +var map_Policy = map[string]string{ + "": "Policy defines the verification policy for the items in the scopes list.", + "rootOfTrust": "rootOfTrust is a required field that defines the root of trust for verifying image signatures during retrieval. This allows image consumers to specify policyType and corresponding configuration of the policy, matching how the policy was generated.", + "signedIdentity": "signedIdentity is an optional field specifies what image identity the signature claims about the image. This is useful when the image identity in the signature differs from the original image spec, such as when mirror registry is configured for the image scope, the signature from the mirror registry contains the image identity of the mirror instead of the original scope. The required matchPolicy field specifies the approach used in the verification process to verify the identity in the signature and the actual image identity, the default matchPolicy is \"MatchRepoDigestOrExact\".", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_PolicyFulcioSubject = map[string]string{ + "": "PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration.", + "oidcIssuer": "oidcIssuer is a required filed contains the expected OIDC issuer. The oidcIssuer must be a valid URL and at most 2048 characters in length. It will be verified that the Fulcio-issued certificate contains a (Fulcio-defined) certificate extension pointing at this OIDC issuer URL. When Fulcio issues certificates, it includes a value based on an URL inside the client-provided ID token. Example: \"https://expected.OIDC.issuer/\"", + "signedEmail": "signedEmail is a required field holds the email address that the Fulcio certificate is issued for. The signedEmail must be a valid email address and at most 320 characters in length. Example: \"expected-signing-user@example.com\"", +} + +func (PolicyFulcioSubject) SwaggerDoc() map[string]string { + return map_PolicyFulcioSubject +} + +var map_PolicyIdentity = map[string]string{ + "": "PolicyIdentity defines image identity the signature claims about the image. When omitted, the default matchPolicy is \"MatchRepoDigestOrExact\".", + "matchPolicy": "matchPolicy is a required filed specifies matching strategy to verify the image identity in the signature against the image scope. Allowed values are \"MatchRepoDigestOrExact\", \"MatchRepository\", \"ExactRepository\", \"RemapIdentity\". When omitted, the default value is \"MatchRepoDigestOrExact\". When set to \"MatchRepoDigestOrExact\", the identity in the signature must be in the same repository as the image identity if the image identity is referenced by a digest. Otherwise, the identity in the signature must be the same as the image identity. When set to \"MatchRepository\", the identity in the signature must be in the same repository as the image identity. When set to \"ExactRepository\", the exactRepository must be specified. The identity in the signature must be in the same repository as a specific identity specified by \"repository\". When set to \"RemapIdentity\", the remapIdentity must be specified. The signature must be in the same as the remapped image identity. Remapped image identity is obtained by replacing the \"prefix\" with the specified “signedPrefix” if the the image identity matches the specified remapPrefix.", + "exactRepository": "exactRepository specifies the repository that must be exactly matched by the identity in the signature. exactRepository is required if matchPolicy is set to \"ExactRepository\". It is used to verify that the signature claims an identity matching this exact repository, rather than the original image identity.", + "remapIdentity": "remapIdentity specifies the prefix remapping rule for verifying image identity. remapIdentity is required if matchPolicy is set to \"RemapIdentity\". It is used to verify that the signature claims a different registry/repository prefix than the original image.", +} + +func (PolicyIdentity) SwaggerDoc() map[string]string { + return map_PolicyIdentity +} + +var map_PolicyMatchExactRepository = map[string]string{ + "repository": "repository is the reference of the image identity to be matched. repository is required if matchPolicy is set to \"ExactRepository\". The value should be a repository name (by omitting the tag or digest) in a registry implementing the \"Docker Registry HTTP API V2\". For example, docker.io/library/busybox", +} + +func (PolicyMatchExactRepository) SwaggerDoc() map[string]string { + return map_PolicyMatchExactRepository +} + +var map_PolicyMatchRemapIdentity = map[string]string{ + "prefix": "prefix is required if matchPolicy is set to \"RemapIdentity\". prefix is the prefix of the image identity to be matched. If the image identity matches the specified prefix, that prefix is replaced by the specified “signedPrefix” (otherwise it is used as unchanged and no remapping takes place). This is useful when verifying signatures for a mirror of some other repository namespace that preserves the vendor’s repository structure. The prefix and signedPrefix values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", + "signedPrefix": "signedPrefix is required if matchPolicy is set to \"RemapIdentity\". signedPrefix is the prefix of the image identity to be matched in the signature. The format is the same as \"prefix\". The values can be either host[:port] values (matching exactly the same host[:port], string), repository namespaces, or repositories (i.e. they must not contain tags/digests), and match as prefixes of the fully expanded form. For example, docker.io/library/busybox (not busybox) to specify that single repository, or docker.io/library (not an empty string) to specify the parent namespace of docker.io/library/busybox.", +} + +func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { + return map_PolicyMatchRemapIdentity +} + +var map_PolicyRootOfTrust = map[string]string{ + "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", + "policyType": "policyType is a required field specifies the type of the policy for verification. This field must correspond to how the policy was generated. Allowed values are \"PublicKey\", \"FulcioCAWithRekor\", and \"PKI\". When set to \"PublicKey\", the policy relies on a sigstore publicKey and may optionally use a Rekor verification. When set to \"FulcioCAWithRekor\", the policy is based on the Fulcio certification and incorporates a Rekor verification. When set to \"PKI\", the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "publicKey": "publicKey defines the root of trust configuration based on a sigstore public key. Optionally include a Rekor public key for Rekor verification. publicKey is required when policyType is PublicKey, and forbidden otherwise.", + "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust configuration based on the Fulcio certificate and the Rekor public key. fulcioCAWithRekor is required when policyType is FulcioCAWithRekor, and forbidden otherwise For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", + "pki": "pki defines the root of trust configuration based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates. pki is required when policyType is PKI, and forbidden otherwise.", +} + +func (PolicyRootOfTrust) SwaggerDoc() map[string]string { + return map_PolicyRootOfTrust +} + +var map_PublicKey = map[string]string{ + "": "PublicKey defines the root of trust based on a sigstore public key.", + "keyData": "keyData is a required field contains inline base64-encoded data for the PEM format public key. keyData must be at most 8192 characters. ", + "rekorKeyData": "rekorKeyData is an optional field contains inline base64-encoded data for the PEM format from the Rekor public key. rekorKeyData must be at most 8192 characters. ", +} + +func (PublicKey) SwaggerDoc() map[string]string { + return map_PublicKey +} + var map_ImageTagMirrorSet = map[string]string{ "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1260,6 +1480,7 @@ var map_AzurePlatformStatus = map[string]string{ "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", + "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { @@ -1392,7 +1613,7 @@ var map_GCPPlatformStatus = map[string]string{ "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", "cloudLoadBalancerConfig": "cloudLoadBalancerConfig holds configuration related to DNS and cloud load balancers. It allows configuration of in-cluster DNS as an alternative to the platform default DNS implementation. When using the ClusterHosted DNS type, Load Balancer IP addresses must be provided for the API and internal API load balancers as well as the ingress load balancer.", - "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 9.", + "serviceEndpoints": "serviceEndpoints specifies endpoints that override the default endpoints used when creating clients to interact with GCP services. When not specified, the default endpoint for the GCP region will be used. Only 1 endpoint override is permitted for each GCP service. The maximum number of endpoint overrides allowed is 11.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { @@ -1432,7 +1653,7 @@ func (GCPServiceEndpoint) SwaggerDoc() map[string]string { var map_IBMCloudPlatformSpec = map[string]string{ "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints. A maximum of 13 service endpoints overrides are supported.", } func (IBMCloudPlatformSpec) SwaggerDoc() map[string]string { @@ -1446,7 +1667,7 @@ var map_IBMCloudPlatformStatus = map[string]string{ "providerType": "providerType indicates the type of cluster that was created", "cisInstanceCRN": "cisInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", "dnsInstanceCRN": "dnsInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", - "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overriden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", + "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of an IBM service. These endpoints are used by components within the cluster when trying to reach the IBM Cloud Services that have been overridden. The CCCMO reads in the IBMCloudPlatformSpec and validates each endpoint is resolvable. Once validated, the cloud config and IBMCloudPlatformStatus are updated to reflect the same custom endpoints.", } func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { @@ -1974,6 +2195,26 @@ func (LoadBalancer) SwaggerDoc() map[string]string { return map_LoadBalancer } +var map_AWSKMSConfig = map[string]string{ + "": "AWSKMSConfig defines the KMS config specific to AWS KMS provider", + "keyARN": "keyARN specifies the Amazon Resource Name (ARN) of the AWS KMS key used for encryption. The value must adhere to the format `arn:aws:kms:::key/`, where: - `` is the AWS region consisting of lowercase letters and hyphens followed by a number. - `` is a 12-digit numeric identifier for the AWS account. - `` is a unique identifier for the KMS key, consisting of lowercase hexadecimal characters and hyphens.", + "region": "region specifies the AWS region where the KMS instance exists, and follows the format `--`, e.g.: `us-east-1`. Only lowercase letters and hyphens followed by numbers are allowed.", +} + +func (AWSKMSConfig) SwaggerDoc() map[string]string { + return map_AWSKMSConfig +} + +var map_KMSConfig = map[string]string{ + "": "KMSConfig defines the configuration for the KMS instance that will be used with KMSEncryptionProvider encryption", + "type": "type defines the kind of platform for the KMS provider. Available provider types are AWS only.", + "aws": "aws defines the key config for using an AWS KMS instance for the encryption. The AWS KMS instance is managed by the user outside the purview of the control plane.", +} + +func (KMSConfig) SwaggerDoc() map[string]string { + return map_KMSConfig +} + var map_ClusterNetworkEntry = map[string]string{ "": "ClusterNetworkEntry is a contiguous block of IP addresses from which pod IPs are allocated.", "cidr": "The complete block for pod IPs.", diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go index e52a2e5c53..77df372d43 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -134,7 +134,7 @@ type RetentionNumberConfig struct { // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 // +required - MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` + MaxNumberOfBackups int `json:"maxNumberOfBackups"` } // RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups @@ -144,7 +144,7 @@ type RetentionSizeConfig struct { // the oldest backup will be removed before a new backup is initiated. // +kubebuilder:validation:Minimum=1 // +required - MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` + MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go index c276971b5e..f6d4cd3420 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_cluster_monitoring.go @@ -17,6 +17,8 @@ limitations under the License. package v1alpha1 import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -51,7 +53,7 @@ type ClusterMonitoring struct { Status ClusterMonitoringStatus `json:"status,omitempty"` } -// MonitoringOperatorStatus defines the observed state of MonitoringOperator +// ClusterMonitoringStatus defines the observed state of ClusterMonitoring type ClusterMonitoringStatus struct { } @@ -72,22 +74,37 @@ type ClusterMonitoringList struct { } // ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator -// +required +// +kubebuilder:validation:MinProperties=1 type ClusterMonitoringSpec struct { // userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. - // +required - UserDefined UserDefinedMonitoring `json:"userDefined"` + // userDefined is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default value is `Disabled`. + // +optional + UserDefined UserDefinedMonitoring `json:"userDefined,omitempty,omitzero"` + // alertmanagerConfig allows users to configure how the default Alertmanager instance + // should be deployed in the `openshift-monitoring` namespace. + // alertmanagerConfig is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `DefaultConfig`. + // +optional + AlertmanagerConfig AlertmanagerConfig `json:"alertmanagerConfig,omitempty,omitzero"` + // metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. + // Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // +optional + MetricsServerConfig MetricsServerConfig `json:"metricsServerConfig,omitempty,omitzero"` } // UserDefinedMonitoring config for user-defined projects. -// +required type UserDefinedMonitoring struct { // mode defines the different configurations of UserDefinedMonitoring // Valid values are Disabled and NamespaceIsolated // Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. // NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. - // +kubebuilder:validation:Enum:="Disabled";"NamespaceIsolated" + // The current default value is `Disabled`. // +required + // +kubebuilder:validation:Enum=Disabled;NamespaceIsolated Mode UserDefinedMode `json:"mode"` } @@ -101,3 +118,345 @@ const ( // UserDefinedNamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. UserDefinedNamespaceIsolated UserDefinedMode = "NamespaceIsolated" ) + +// alertmanagerConfig provides configuration options for the default Alertmanager instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:XValidation:rule="self.deploymentMode == 'CustomConfig' ? has(self.customConfig) : !has(self.customConfig)",message="customConfig is required when deploymentMode is CustomConfig, and forbidden otherwise" +type AlertmanagerConfig struct { + // deploymentMode determines whether the default Alertmanager instance should be deployed + // as part of the monitoring stack. + // Allowed values are Disabled, DefaultConfig, and CustomConfig. + // When set to Disabled, the Alertmanager instance will not be deployed. + // When set to DefaultConfig, the platform will deploy Alertmanager with default settings. + // When set to CustomConfig, the Alertmanager will be deployed with custom configuration. + // + // +unionDiscriminator + // +required + DeploymentMode AlertManagerDeployMode `json:"deploymentMode,omitempty"` + + // customConfig must be set when deploymentMode is CustomConfig, and must be unset otherwise. + // When set to CustomConfig, the Alertmanager will be deployed with custom configuration. + // +optional + CustomConfig AlertmanagerCustomConfig `json:"customConfig,omitempty,omitzero"` +} + +// AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. +// alertmanagerCustomConfig provides configuration options for the default Alertmanager instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type AlertmanagerCustomConfig struct { + // logLevel defines the verbosity of logs emitted by Alertmanager. + // This field allows users to control the amount and severity of logs generated, which can be useful + // for debugging issues or reducing noise in production environments. + // Allowed values are Error, Warn, Info, and Debug. + // When set to Error, only errors will be logged. + // When set to Warn, both warnings and errors will be logged. + // When set to Info, general information, warnings, and errors will all be logged. + // When set to Debug, detailed debugging information will be logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Info`. + // +optional + LogLevel LogLevel `json:"logLevel,omitempty"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // resources defines the compute resource requests and limits for the Alertmanager container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // secrets defines a list of secrets that need to be mounted into the Alertmanager. + // The secrets must reside within the same namespace as the Alertmanager object. + // They will be added as volumes named secret- and mounted at + // /etc/alertmanager/secrets/ within the 'alertmanager' container of + // the Alertmanager Pods. + // + // These secrets can be used to authenticate Alertmanager with endpoint receivers. + // For example, you can use secrets to: + // - Provide certificates for TLS authentication with receivers that require private CA certificates + // - Store credentials for Basic HTTP authentication with receivers that require password-based auth + // - Store any other authentication credentials needed by your alert receivers + // + // This field is optional. + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // Entries in this list must be unique. + // +optional + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=set + Secrets []SecretName `json:"secrets,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10 + // Minimum length for this list is 1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1 + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + // volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to + // configure the persistent volume claim, including storage class, volume + // size, and name. + // If omitted, the Pod uses ephemeral storage and alert data will not persist + // across restarts. + // This field is optional. + // +optional + VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// AlertManagerDeployMode defines the deployment state of the platform Alertmanager instance. +// +// Possible values: +// - "Disabled": The Alertmanager instance will not be deployed. +// - "DefaultConfig": The Alertmanager instance will be deployed with default settings. +// - "CustomConfig": The Alertmanager instance will be deployed with custom configuration. +// +kubebuilder:validation:Enum=Disabled;DefaultConfig;CustomConfig +type AlertManagerDeployMode string + +const ( + // AlertManagerModeDisabled means the Alertmanager instance will not be deployed. + AlertManagerDeployModeDisabled AlertManagerDeployMode = "Disabled" + // AlertManagerModeDefaultConfig means the Alertmanager instance will be deployed with default settings. + AlertManagerDeployModeDefaultConfig AlertManagerDeployMode = "DefaultConfig" + // AlertManagerModeCustomConfig means the Alertmanager instance will be deployed with custom configuration. + AlertManagerDeployModeCustomConfig AlertManagerDeployMode = "CustomConfig" +) + +// logLevel defines the verbosity of logs emitted by Alertmanager. +// Valid values are Error, Warn, Info and Debug. +// +kubebuilder:validation:Enum=Error;Warn;Info;Debug +type LogLevel string + +const ( + // Error only errors will be logged. + LogLevelError LogLevel = "Error" + // Warn, both warnings and errors will be logged. + LogLevelWarn LogLevel = "Warn" + // Info, general information, warnings, and errors will all be logged. + LogLevelInfo LogLevel = "Info" + // Debug, detailed debugging information will be logged. + LogLevelDebug LogLevel = "Debug" +) + +// ContainerResource defines a single resource requirement for a container. +// +kubebuilder:validation:XValidation:rule="has(self.request) || has(self.limit)",message="at least one of request or limit must be set" +// +kubebuilder:validation:XValidation:rule="!(has(self.request) && has(self.limit)) || quantity(self.limit).compareTo(quantity(self.request)) >= 0",message="limit must be greater than or equal to request" +type ContainerResource struct { + // name of the resource (e.g. "cpu", "memory", "hugepages-2Mi"). + // This field is required. + // name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character. + // +required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:XValidation:rule="!format.qualifiedName().validate(self).hasValue()",message="name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character" + Name string `json:"name,omitempty"` + + // request is the minimum amount of the resource required (e.g. "2Mi", "1Gi"). + // This field is optional. + // When limit is specified, request cannot be greater than limit. + // +optional + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:MaxLength=20 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="isQuantity(self) && quantity(self).isGreaterThan(quantity('0'))",message="request must be a positive, non-zero quantity" + Request resource.Quantity `json:"request,omitempty"` + + // limit is the maximum amount of the resource allowed (e.g. "2Mi", "1Gi"). + // This field is optional. + // When request is specified, limit cannot be less than request. + // The value must be greater than 0 when specified. + // +optional + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:MaxLength=20 + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:XValidation:rule="isQuantity(self) && quantity(self).isGreaterThan(quantity('0'))",message="limit must be a positive, non-zero quantity" + Limit resource.Quantity `json:"limit,omitempty"` +} + +// SecretName is a type that represents the name of a Secret in the same namespace. +// It must be at most 253 characters in length. +// +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." +// +kubebuilder:validation:MaxLength=63 +type SecretName string + +// MetricsServerConfig provides configuration options for the Metrics Server instance +// that runs in the `openshift-monitoring` namespace. Use this configuration to control +// how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled. +// +kubebuilder:validation:MinProperties=1 +type MetricsServerConfig struct { + // audit defines the audit configuration used by the Metrics Server instance. + // audit is optional. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + //The current default sets audit.profile to Metadata + // +optional + Audit Audit `json:"audit,omitempty,omitzero"` + // nodeSelector defines the nodes on which the Pods are scheduled + // nodeSelector is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // The current default value is `kubernetes.io/os: linux`. + // +optional + // +kubebuilder:validation:MinProperties=1 + // +kubebuilder:validation:MaxProperties=10 + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // tolerations defines tolerations for the pods. + // tolerations is optional. + // + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // Defaults are empty/unset. + // Maximum length for this list is 10 + // Minimum length for this list is 1 + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=atomic + // +optional + Tolerations []v1.Toleration `json:"tolerations,omitempty"` + // verbosity defines the verbosity of log messages for Metrics Server. + // Valid values are Errors, Info, Trace, TraceAll and omitted. + // When set to Errors, only critical messages and errors are logged. + // When set to Info, only basic information messages are logged. + // When set to Trace, information useful for general debugging is logged. + // When set to TraceAll, detailed information about metric scraping is logged. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. + // The current default value is `Errors` + // +optional + Verbosity VerbosityLevel `json:"verbosity,omitempty,omitzero"` + // resources defines the compute resource requests and limits for the Metrics Server container. + // This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. + // When not specified, defaults are used by the platform. Requests cannot exceed limits. + // This field is optional. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + // This is a simplified API that maps to Kubernetes ResourceRequirements. + // The current default values are: + // resources: + // - name: cpu + // request: 4m + // limit: null + // - name: memory + // request: 40Mi + // limit: null + // Maximum length for this list is 10. + // Minimum length for this list is 1. + // +optional + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + Resources []ContainerResource `json:"resources,omitempty"` + // topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed + // across topology domains such as zones, nodes, or other user-defined labels. + // topologySpreadConstraints is optional. + // This helps improve high availability and resource efficiency by avoiding placing + // too many replicas in the same failure domain. + // + // When omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. + // This field maps directly to the `topologySpreadConstraints` field in the Pod spec. + // Default is empty list. + // Maximum length for this list is 10. + // Minimum length for this list is 1 + // Entries must have unique topologyKey and whenUnsatisfiable pairs. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:MinItems=1 + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + // +optional + TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// AuditProfile defines the audit log level for the Metrics Server. +// +kubebuilder:validation:Enum=None;Metadata;Request;RequestResponse +type AuditProfile string + +const ( + // AuditProfileNone disables audit logging + AuditProfileNone AuditProfile = "None" + // AuditProfileMetadata logs request metadata (requesting user, timestamp, resource, verb, etc.) but not request or response body + AuditProfileMetadata AuditProfile = "Metadata" + // AuditProfileRequest logs event metadata and request body but not response body + AuditProfileRequest AuditProfile = "Request" + // AuditProfileRequestResponse logs event metadata, request and response bodies + AuditProfileRequestResponse AuditProfile = "RequestResponse" +) + +// VerbosityLevel defines the verbosity of log messages for Metrics Server. +// +kubebuilder:validation:Enum=Errors;Info;Trace;TraceAll +type VerbosityLevel string + +const ( + // VerbosityLevelErrors means only critical messages and errors are logged. + VerbosityLevelErrors VerbosityLevel = "Errors" + // VerbosityLevelInfo means basic informational messages are logged. + VerbosityLevelInfo VerbosityLevel = "Info" + // VerbosityLevelTrace means extended information useful for general debugging is logged. + VerbosityLevelTrace VerbosityLevel = "Trace" + // VerbosityLevelTraceAll means detailed information about metric scraping operations is logged. + VerbosityLevelTraceAll VerbosityLevel = "TraceAll" +) + +// Audit profile configurations +type Audit struct { + // profile is a required field for configuring the audit log level of the Kubernetes Metrics Server. + // Allowed values are None, Metadata, Request, or RequestResponse. + // When set to None, audit logging is disabled and no audit events are recorded. + // When set to Metadata, only request metadata (such as requesting user, timestamp, resource, verb, etc.) is logged, but not the request or response body. + // When set to Request, event metadata and the request body are logged, but not the response body. + // When set to RequestResponse, event metadata, request body, and response body are all logged, providing the most detailed audit information. + // + // See: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy + // for more information about auditing and log levels. + // +required + Profile AuditProfile `json:"profile,omitempty"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go index 24ff257c93..64a89e4a63 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_image_policy.go @@ -78,7 +78,7 @@ type PolicyRootOfTrust struct { // policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. // "PublicKey" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. // "FulcioCAWithRekor" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. - // "PKI" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. + // "PKI" indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate. // +unionDiscriminator // +required PolicyType PolicyType `json:"policyType"` @@ -134,7 +134,7 @@ type FulcioCAWithRekor struct { RekorKeyData []byte `json:"rekorKeyData"` // fulcioSubject specifies OIDC issuer and the email of the Fulcio authentication configuration. // +required - FulcioSubject PolicyFulcioSubject `json:"fulcioSubject,omitempty"` + FulcioSubject PolicyFulcioSubject `json:"fulcioSubject"` } // PolicyFulcioSubject defines the OIDC issuer and the email of the Fulcio authentication configuration. @@ -261,6 +261,7 @@ type ImagePolicyStatus struct { // conditions provide details on the status of this API Resource. // +listType=map // +listMapKey=type + // +optional Conditions []metav1.Condition `json:"conditions,omitempty"` } diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go index 17d74e0fa8..6549f6cbe4 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -1,15 +1,103 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerConfig) DeepCopyInto(out *AlertmanagerConfig) { + *out = *in + in.CustomConfig.DeepCopyInto(&out.CustomConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerConfig. +func (in *AlertmanagerConfig) DeepCopy() *AlertmanagerConfig { + if in == nil { + return nil + } + out := new(AlertmanagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertmanagerCustomConfig) DeepCopyInto(out *AlertmanagerCustomConfig) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretName, len(*in)) + copy(*out, *in) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeClaimTemplate != nil { + in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate + *out = new(v1.PersistentVolumeClaim) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertmanagerCustomConfig. +func (in *AlertmanagerCustomConfig) DeepCopy() *AlertmanagerCustomConfig { + if in == nil { + return nil + } + out := new(AlertmanagerCustomConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Audit) DeepCopyInto(out *Audit) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Audit. +func (in *Audit) DeepCopy() *Audit { + if in == nil { + return nil + } + out := new(Audit) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Backup) DeepCopyInto(out *Backup) { *out = *in @@ -192,7 +280,7 @@ func (in *ClusterImagePolicyStatus) DeepCopyInto(out *ClusterImagePolicyStatus) *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -215,7 +303,7 @@ func (in *ClusterMonitoring) DeepCopyInto(out *ClusterMonitoring) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) out.Status = in.Status return } @@ -275,6 +363,8 @@ func (in *ClusterMonitoringList) DeepCopyObject() runtime.Object { func (in *ClusterMonitoringSpec) DeepCopyInto(out *ClusterMonitoringSpec) { *out = *in out.UserDefined = in.UserDefined + in.AlertmanagerConfig.DeepCopyInto(&out.AlertmanagerConfig) + in.MetricsServerConfig.DeepCopyInto(&out.MetricsServerConfig) return } @@ -304,6 +394,24 @@ func (in *ClusterMonitoringStatus) DeepCopy() *ClusterMonitoringStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerResource) DeepCopyInto(out *ContainerResource) { + *out = *in + out.Request = in.Request.DeepCopy() + out.Limit = in.Limit.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerResource. +func (in *ContainerResource) DeepCopy() *ContainerResource { + if in == nil { + return nil + } + out := new(ContainerResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { *out = *in @@ -462,7 +570,7 @@ func (in *ImagePolicyStatus) DeepCopyInto(out *ImagePolicyStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.Condition, len(*in)) + *out = make([]metav1.Condition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -574,6 +682,51 @@ func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricsServerConfig) DeepCopyInto(out *MetricsServerConfig) { + *out = *in + out.Audit = in.Audit + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]ContainerResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]v1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsServerConfig. +func (in *MetricsServerConfig) DeepCopy() *MetricsServerConfig { + if in == nil { + return nil + } + out := new(MetricsServerConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PKI) DeepCopyInto(out *PKI) { *out = *in diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go index 504281540b..6ba6ad11f4 100644 --- a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -118,6 +118,40 @@ func (ClusterImagePolicyStatus) SwaggerDoc() map[string]string { return map_ClusterImagePolicyStatus } +var map_AlertmanagerConfig = map[string]string{ + "": "alertmanagerConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", + "deploymentMode": "deploymentMode determines whether the default Alertmanager instance should be deployed as part of the monitoring stack. Allowed values are Disabled, DefaultConfig, and CustomConfig. When set to Disabled, the Alertmanager instance will not be deployed. When set to DefaultConfig, the platform will deploy Alertmanager with default settings. When set to CustomConfig, the Alertmanager will be deployed with custom configuration.", + "customConfig": "customConfig must be set when deploymentMode is CustomConfig, and must be unset otherwise. When set to CustomConfig, the Alertmanager will be deployed with custom configuration.", +} + +func (AlertmanagerConfig) SwaggerDoc() map[string]string { + return map_AlertmanagerConfig +} + +var map_AlertmanagerCustomConfig = map[string]string{ + "": "AlertmanagerCustomConfig represents the configuration for a custom Alertmanager deployment. alertmanagerCustomConfig provides configuration options for the default Alertmanager instance that runs in the `openshift-monitoring` namespace. Use this configuration to control whether the default Alertmanager is deployed, how it logs, and how its pods are scheduled.", + "logLevel": "logLevel defines the verbosity of logs emitted by Alertmanager. This field allows users to control the amount and severity of logs generated, which can be useful for debugging issues or reducing noise in production environments. Allowed values are Error, Warn, Info, and Debug. When set to Error, only errors will be logged. When set to Warn, both warnings and errors will be logged. When set to Info, general information, warnings, and errors will all be logged. When set to Debug, detailed debugging information will be logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Info`.", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", + "resources": "resources defines the compute resource requests and limits for the Alertmanager container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "secrets": "secrets defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods.\n\nThese secrets can be used to authenticate Alertmanager with endpoint receivers. For example, you can use secrets to: - Provide certificates for TLS authentication with receivers that require private CA certificates - Store credentials for Basic HTTP authentication with receivers that require password-based auth - Store any other authentication credentials needed by your alert receivers\n\nThis field is optional. Maximum length for this list is 10. Minimum length for this list is 1. Entries in this list must be unique.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Alertmanager Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", + "volumeClaimTemplate": "volumeClaimTemplate Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. If omitted, the Pod uses ephemeral storage and alert data will not persist across restarts. This field is optional.", +} + +func (AlertmanagerCustomConfig) SwaggerDoc() map[string]string { + return map_AlertmanagerCustomConfig +} + +var map_Audit = map[string]string{ + "": "Audit profile configurations", + "profile": "profile is a required field for configuring the audit log level of the Kubernetes Metrics Server. Allowed values are None, Metadata, Request, or RequestResponse. When set to None, audit logging is disabled and no audit events are recorded. When set to Metadata, only request metadata (such as requesting user, timestamp, resource, verb, etc.) is logged, but not the request or response body. When set to Request, event metadata and the request body are logged, but not the response body. When set to RequestResponse, event metadata, request body, and response body are all logged, providing the most detailed audit information.\n\nSee: https://kubernetes.io/docs/tasks/debug-application-cluster/audit/#audit-policy for more information about auditing and log levels.", +} + +func (Audit) SwaggerDoc() map[string]string { + return map_Audit +} + var map_ClusterMonitoring = map[string]string{ "": "ClusterMonitoring is the Custom Resource object which holds the current status of Cluster Monitoring Operator. CMO is a central component of the monitoring stack.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. ClusterMonitoring is the Schema for the Cluster Monitoring Operators API", "metadata": "metadata is the standard object metadata.", @@ -140,8 +174,10 @@ func (ClusterMonitoringList) SwaggerDoc() map[string]string { } var map_ClusterMonitoringSpec = map[string]string{ - "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", - "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring.", + "": "ClusterMonitoringSpec defines the desired state of Cluster Monitoring Operator", + "userDefined": "userDefined set the deployment mode for user-defined monitoring in addition to the default platform monitoring. userDefined is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default value is `Disabled`.", + "alertmanagerConfig": "alertmanagerConfig allows users to configure how the default Alertmanager instance should be deployed in the `openshift-monitoring` namespace. alertmanagerConfig is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `DefaultConfig`.", + "metricsServerConfig": "metricsServerConfig is an optional field that can be used to configure the Kubernetes Metrics Server that runs in the openshift-monitoring namespace. Specifically, it can configure how the Metrics Server instance is deployed, pod scheduling, its audit policy and log verbosity. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time.", } func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { @@ -149,16 +185,41 @@ func (ClusterMonitoringSpec) SwaggerDoc() map[string]string { } var map_ClusterMonitoringStatus = map[string]string{ - "": "MonitoringOperatorStatus defines the observed state of MonitoringOperator", + "": "ClusterMonitoringStatus defines the observed state of ClusterMonitoring", } func (ClusterMonitoringStatus) SwaggerDoc() map[string]string { return map_ClusterMonitoringStatus } +var map_ContainerResource = map[string]string{ + "": "ContainerResource defines a single resource requirement for a container.", + "name": "name of the resource (e.g. \"cpu\", \"memory\", \"hugepages-2Mi\"). This field is required. name must consist only of alphanumeric characters, `-`, `_` and `.` and must start and end with an alphanumeric character.", + "request": "request is the minimum amount of the resource required (e.g. \"2Mi\", \"1Gi\"). This field is optional. When limit is specified, request cannot be greater than limit.", + "limit": "limit is the maximum amount of the resource allowed (e.g. \"2Mi\", \"1Gi\"). This field is optional. When request is specified, limit cannot be less than request. The value must be greater than 0 when specified.", +} + +func (ContainerResource) SwaggerDoc() map[string]string { + return map_ContainerResource +} + +var map_MetricsServerConfig = map[string]string{ + "": "MetricsServerConfig provides configuration options for the Metrics Server instance that runs in the `openshift-monitoring` namespace. Use this configuration to control how the Metrics Server instance is deployed, how it logs, and how its pods are scheduled.", + "audit": "audit defines the audit configuration used by the Metrics Server instance. audit is optional. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default sets audit.profile to Metadata", + "nodeSelector": "nodeSelector defines the nodes on which the Pods are scheduled nodeSelector is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. The current default value is `kubernetes.io/os: linux`.", + "tolerations": "tolerations defines tolerations for the pods. tolerations is optional.\n\nWhen omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. Defaults are empty/unset. Maximum length for this list is 10 Minimum length for this list is 1", + "verbosity": "verbosity defines the verbosity of log messages for Metrics Server. Valid values are Errors, Info, Trace, TraceAll and omitted. When set to Errors, only critical messages and errors are logged. When set to Info, only basic information messages are logged. When set to Trace, information useful for general debugging is logged. When set to TraceAll, detailed information about metric scraping is logged. When omitted, this means no opinion and the platform is left to choose a reasonable default, that is subject to change over time. The current default value is `Errors`", + "resources": "resources defines the compute resource requests and limits for the Metrics Server container. This includes CPU, memory and HugePages constraints to help control scheduling and resource usage. When not specified, defaults are used by the platform. Requests cannot exceed limits. This field is optional. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ This is a simplified API that maps to Kubernetes ResourceRequirements. The current default values are:\n resources:\n - name: cpu\n request: 4m\n limit: null\n - name: memory\n request: 40Mi\n limit: null\nMaximum length for this list is 10. Minimum length for this list is 1.", + "topologySpreadConstraints": "topologySpreadConstraints defines rules for how Metrics Server Pods should be distributed across topology domains such as zones, nodes, or other user-defined labels. topologySpreadConstraints is optional. This helps improve high availability and resource efficiency by avoiding placing too many replicas in the same failure domain.\n\nWhen omitted, this means no opinion and the platform is left to choose a default, which is subject to change over time. This field maps directly to the `topologySpreadConstraints` field in the Pod spec. Default is empty list. Maximum length for this list is 10. Minimum length for this list is 1 Entries must have unique topologyKey and whenUnsatisfiable pairs.", +} + +func (MetricsServerConfig) SwaggerDoc() map[string]string { + return map_MetricsServerConfig +} + var map_UserDefinedMonitoring = map[string]string{ "": "UserDefinedMonitoring config for user-defined projects.", - "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level.", + "mode": "mode defines the different configurations of UserDefinedMonitoring Valid values are Disabled and NamespaceIsolated Disabled disables monitoring for user-defined projects. This restricts the default monitoring stack, installed in the openshift-monitoring project, to monitor only platform namespaces, which prevents any custom monitoring configurations or resources from being applied to user-defined namespaces. NamespaceIsolated enables monitoring for user-defined projects with namespace-scoped tenancy. This ensures that metrics, alerts, and monitoring data are isolated at the namespace level. The current default value is `Disabled`.", } func (UserDefinedMonitoring) SwaggerDoc() map[string]string { @@ -285,7 +346,7 @@ func (PolicyMatchRemapIdentity) SwaggerDoc() map[string]string { var map_PolicyRootOfTrust = map[string]string{ "": "PolicyRootOfTrust defines the root of trust based on the selected policyType.", - "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" is a DevPreview feature that indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", + "policyType": "policyType serves as the union's discriminator. Users are required to assign a value to this field, choosing one of the policy types that define the root of trust. \"PublicKey\" indicates that the policy relies on a sigstore publicKey and may optionally use a Rekor verification. \"FulcioCAWithRekor\" indicates that the policy is based on the Fulcio certification and incorporates a Rekor verification. \"PKI\" indicates that the policy is based on the certificates from Bring Your Own Public Key Infrastructure (BYOPKI). This value is enabled by turning on the SigstoreImageVerificationPKI feature gate.", "publicKey": "publicKey defines the root of trust based on a sigstore public key.", "fulcioCAWithRekor": "fulcioCAWithRekor defines the root of trust based on the Fulcio certificate and the Rekor public key. For more information about Fulcio and Rekor, please refer to the document at: https://github.com/sigstore/fulcio and https://github.com/sigstore/rekor", "pki": "pki defines the root of trust based on Bring Your Own Public Key Infrastructure (BYOPKI) Root CA(s) and corresponding intermediate certificates.", diff --git a/vendor/github.com/openshift/api/config/v1alpha2/Makefile b/vendor/github.com/openshift/api/config/v1alpha2/Makefile new file mode 100644 index 0000000000..933e5dd439 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1alpha2" diff --git a/vendor/github.com/openshift/api/config/v1alpha2/doc.go b/vendor/github.com/openshift/api/config/v1alpha2/doc.go new file mode 100644 index 0000000000..15ac6b4972 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=config.openshift.io +// Package v1alpha2 is the v1alpha2 version of the API. +package v1alpha2 diff --git a/vendor/github.com/openshift/api/config/v1alpha2/register.go b/vendor/github.com/openshift/api/config/v1alpha2/register.go new file mode 100644 index 0000000000..dfd9e6f0e1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/register.go @@ -0,0 +1,38 @@ +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha2"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &InsightsDataGather{}, + &InsightsDataGatherList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go new file mode 100644 index 0000000000..d59f5920b1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/types_insights.go @@ -0,0 +1,220 @@ +package v1alpha2 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// InsightsDataGather provides data gather configuration options for the the Insights Operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=insightsdatagathers,scope=Cluster +// +kubebuilder:subresource:status +// +openshift:api-approved.openshift.io=https://github.com/openshift/api/pull/2195 +// +openshift:file-pattern=cvoRunLevel=0000_10,operatorName=config-operator,operatorOrdering=01 +// +openshift:enable:FeatureGate=InsightsConfig +// +openshift:compatibility-gen:level=4 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // spec holds user settable values for configuration + // +required + Spec InsightsDataGatherSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InsightsDataGatherStatus `json:"status"` +} + +type InsightsDataGatherSpec struct { + // gatherConfig is an optional spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. + // +optional + GatherConfig GatherConfig `json:"gatherConfig"` +} + +type InsightsDataGatherStatus struct{} + +// gatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. + // It may not exceed 2 items and must not contain duplicates. + // Valid values are ObfuscateNetworking and WorkloadNames. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. + // When omitted no obfuscation is applied. + // +kubebuilder:validation:MaxItems=2 + // +kubebuilder:validation:XValidation:rule="self.all(x, self.exists_one(y, x == y))",message="dataPolicy items must be unique" + // +listType=atomic + // +optional + DataPolicy []DataPolicyOption `json:"dataPolicy"` + // gatherers is a required field that specifies the configuration of the gatherers. + // +required + Gatherers Gatherers `json:"gatherers"` + // storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. + // If omitted, the gathering job will use ephemeral storage. + // +optional + Storage *Storage `json:"storage,omitempty"` +} + +// +kubebuilder:validation:XValidation:rule="has(self.mode) && self.mode == 'Custom' ? has(self.custom) : !has(self.custom)",message="custom is required when mode is Custom, and forbidden otherwise" +type Gatherers struct { + // mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. + // When set to All, all gatherers wil run and gather data. + // When set to None, all gatherers will be disabled and no data will be gathered. + // When set to Custom, the custom configuration from the custom field will be applied. + // +required + Mode GatheringMode `json:"mode"` + // custom provides gathering configuration. + // It is required when mode is Custom, and forbidden otherwise. + // Custom configuration allows user to disable only a subset of gatherers. + // Gatherers that are not explicitly disabled in custom configuration will run. + // +optional + Custom *Custom `json:"custom,omitempty"` +} + +// custom provides the custom configuration of gatherers +type Custom struct { + // configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. + // It may not exceed 100 items and each gatherer can be present only once. + // It is possible to disable an entire set of gatherers while allowing a specific function within that set. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MaxItems=100 + // +listType=map + // +listMapKey=name + // +required + Configs []GathererConfig `json:"configs"` +} + +// gatheringMode defines the valid gathering modes. +// +kubebuilder:validation:Enum=All;None;Custom +type GatheringMode string + +const ( + // Enabled enables all gatherers + GatheringModeAll GatheringMode = "All" + // Disabled disables all gatherers + GatheringModeNone GatheringMode = "None" + // Custom applies the configuration from GatheringConfig. + GatheringModeCustom GatheringMode = "Custom" +) + +// dataPolicyOption declares valid data policy options +// +kubebuilder:validation:Enum=ObfuscateNetworking;WorkloadNames +type DataPolicyOption string + +const ( + // IP addresses and cluster domain name are obfuscated + DataPolicyOptionObfuscateNetworking DataPolicyOption = "ObfuscateNetworking" + // Data from Deployment Validation Operator are obfuscated + DataPolicyOptionObfuscateWorkloadNames DataPolicyOption = "WorkloadNames" +) + +// storage provides persistent storage configuration options for gathering jobs. +// If the type is set to PersistentVolume, then the PersistentVolume must be defined. +// If the type is set to Ephemeral, then the PersistentVolume must not be defined. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'PersistentVolume' ? has(self.persistentVolume) : !has(self.persistentVolume)",message="persistentVolume is required when type is PersistentVolume, and forbidden otherwise" +type Storage struct { + // type is a required field that specifies the type of storage that will be used to store the Insights data archive. + // Valid values are "PersistentVolume" and "Ephemeral". + // When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. + // When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field. + // +required + Type StorageType `json:"type"` + // persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. + // The PersistentVolume must be created in the openshift-insights namespace. + // +optional + PersistentVolume *PersistentVolumeConfig `json:"persistentVolume,omitempty"` +} + +// storageType declares valid storage types +// +kubebuilder:validation:Enum=PersistentVolume;Ephemeral +type StorageType string + +const ( + // StorageTypePersistentVolume storage type + StorageTypePersistentVolume StorageType = "PersistentVolume" + // StorageTypeEphemeral storage type + StorageTypeEphemeral StorageType = "Ephemeral" +) + +// persistentVolumeConfig provides configuration options for PersistentVolume storage. +type PersistentVolumeConfig struct { + // claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. + // The PersistentVolumeClaim must be created in the openshift-insights namespace. + // +required + Claim PersistentVolumeClaimReference `json:"claim"` + // mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default mount path is /var/lib/insights-operator + // The path may not exceed 1024 characters and must not contain a colon. + // +kubebuilder:validation:MaxLength=1024 + // +kubebuilder:validation:XValidation:rule="!self.contains(':')",message="mountPath must not contain a colon" + // +optional + MountPath string `json:"mountPath,omitempty"` +} + +// persistentVolumeClaimReference is a reference to a PersistentVolumeClaim. +type PersistentVolumeClaimReference struct { + // name is a string that follows the DNS1123 subdomain format. + // It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character. + // +kubebuilder:validation:XValidation:rule="!format.dns1123Subdomain().validate(self).hasValue()",message="a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character." + // +kubebuilder:validation:MaxLength:=253 + // +required + Name string `json:"name"` +} + +// gathererConfig allows to configure specific gatherers +type GathererConfig struct { + // name is the required name of a specific gatherer + // It may not exceed 256 characters. + // The format for a gatherer name is: {gatherer}/{function} where the function is optional. + // Gatherer consists of a lowercase letters only that may include underscores (_). + // Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). + // The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:XValidation:rule=`self.matches("^[a-z]+[_a-z]*[a-z]([/a-z][_a-z]*)?[a-z]$")`,message=`gatherer name must be in the format of {gatherer}/{function} where the gatherer and function are lowercase letters only that may include underscores (_) and are separated by a forward slash (/) if the function is provided` + // +required + Name string `json:"name"` + // state is a required field that allows you to configure specific gatherer. Valid values are "Enabled" and "Disabled". + // When set to Enabled the gatherer will run. + // When set to Disabled the gatherer will not run. + // +required + State GathererState `json:"state"` +} + +// state declares valid gatherer state types. +// +kubebuilder:validation:Enum=Enabled;Disabled +type GathererState string + +const ( + // GathererStateEnabled gatherer state, which means that the gatherer will run. + GathererStateEnabled GathererState = "Enabled" + // GathererStateDisabled gatherer state, which means that the gatherer will not run. + GathererStateDisabled GathererState = "Disabled" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the required standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +required + metav1.ListMeta `json:"metadata"` + // items is the required list of InsightsDataGather objects + // it may not exceed 100 items + // +kubebuilder:validation:MaxItems=100 + // +required + Items []InsightsDataGather `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 0000000000..27586f99b7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,243 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by codegen. DO NOT EDIT. + +package v1alpha2 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Custom) DeepCopyInto(out *Custom) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]GathererConfig, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Custom. +func (in *Custom) DeepCopy() *Custom { + if in == nil { + return nil + } + out := new(Custom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DataPolicy != nil { + in, out := &in.DataPolicy, &out.DataPolicy + *out = make([]DataPolicyOption, len(*in)) + copy(*out, *in) + } + in.Gatherers.DeepCopyInto(&out.Gatherers) + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = new(Storage) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GathererConfig) DeepCopyInto(out *GathererConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GathererConfig. +func (in *GathererConfig) DeepCopy() *GathererConfig { + if in == nil { + return nil + } + out := new(GathererConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Gatherers) DeepCopyInto(out *Gatherers) { + *out = *in + if in.Custom != nil { + in, out := &in.Custom, &out.Custom + *out = new(Custom) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Gatherers. +func (in *Gatherers) DeepCopy() *Gatherers { + if in == nil { + return nil + } + out := new(Gatherers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherStatus) DeepCopyInto(out *InsightsDataGatherStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherStatus. +func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { + if in == nil { + return nil + } + out := new(InsightsDataGatherStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimReference) DeepCopyInto(out *PersistentVolumeClaimReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimReference. +func (in *PersistentVolumeClaimReference) DeepCopy() *PersistentVolumeClaimReference { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeConfig) DeepCopyInto(out *PersistentVolumeConfig) { + *out = *in + out.Claim = in.Claim + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeConfig. +func (in *PersistentVolumeConfig) DeepCopy() *PersistentVolumeConfig { + if in == nil { + return nil + } + out := new(PersistentVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Storage) DeepCopyInto(out *Storage) { + *out = *in + if in.PersistentVolume != nil { + in, out := &in.PersistentVolume, &out.PersistentVolume + *out = new(PersistentVolumeConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Storage. +func (in *Storage) DeepCopy() *Storage { + if in == nil { + return nil + } + out := new(Storage) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml new file mode 100644 index 0000000000..99fe308ef8 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.featuregated-crd-manifests.yaml @@ -0,0 +1,23 @@ +insightsdatagathers.config.openshift.io: + Annotations: {} + ApprovedPRNumber: https://github.com/openshift/api/pull/2195 + CRDName: insightsdatagathers.config.openshift.io + Capability: "" + Category: "" + FeatureGates: + - InsightsConfig + FilenameOperatorName: config-operator + FilenameOperatorOrdering: "01" + FilenameRunLevel: "0000_10" + GroupName: config.openshift.io + HasStatus: true + KindName: InsightsDataGather + Labels: {} + PluralName: insightsdatagathers + PrinterColumns: [] + Scope: Cluster + ShortNames: null + TopLevelFeatureGates: + - InsightsConfig + Version: v1alpha2 + diff --git a/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.swagger_doc_generated.go new file mode 100644 index 0000000000..695c0c70a4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha2/zz_generated.swagger_doc_generated.go @@ -0,0 +1,111 @@ +package v1alpha2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Custom = map[string]string{ + "": "custom provides the custom configuration of gatherers", + "configs": "configs is a required list of gatherers configurations that can be used to enable or disable specific gatherers. It may not exceed 100 items and each gatherer can be present only once. It is possible to disable an entire set of gatherers while allowing a specific function within that set. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", +} + +func (Custom) SwaggerDoc() map[string]string { + return map_Custom +} + +var map_GatherConfig = map[string]string{ + "": "gatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy is an optional list of DataPolicyOptions that allows user to enable additional obfuscation of the Insights archive data. It may not exceed 2 items and must not contain duplicates. Valid values are ObfuscateNetworking and WorkloadNames. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When set to WorkloadNames, the gathered data about cluster resources will not contain the workload names for your deployments. Resources UIDs will be used instead. When omitted no obfuscation is applied.", + "gatherers": "gatherers is a required field that specifies the configuration of the gatherers.", + "storage": "storage is an optional field that allows user to define persistent storage for gathering jobs to store the Insights data archive. If omitted, the gathering job will use ephemeral storage.", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_GathererConfig = map[string]string{ + "": "gathererConfig allows to configure specific gatherers", + "name": "name is the required name of a specific gatherer It may not exceed 256 characters. The format for a gatherer name is: {gatherer}/{function} where the function is optional. Gatherer consists of a lowercase letters only that may include underscores (_). Function consists of a lowercase letters only that may include underscores (_) and is separated from the gatherer by a forward slash (/). The particular gatherers can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\"", + "state": "state is a required field that allows you to configure specific gatherer. Valid values are \"Enabled\" and \"Disabled\". When set to Enabled the gatherer will run. When set to Disabled the gatherer will not run.", +} + +func (GathererConfig) SwaggerDoc() map[string]string { + return map_GathererConfig +} + +var map_Gatherers = map[string]string{ + "mode": "mode is a required field that specifies the mode for gatherers. Allowed values are All, None, and Custom. When set to All, all gatherers wil run and gather data. When set to None, all gatherers will be disabled and no data will be gathered. When set to Custom, the custom configuration from the custom field will be applied.", + "custom": "custom provides gathering configuration. It is required when mode is Custom, and forbidden otherwise. Custom configuration allows user to disable only a subset of gatherers. Gatherers that are not explicitly disabled in custom configuration will run.", +} + +func (Gatherers) SwaggerDoc() map[string]string { + return map_Gatherers +} + +var map_InsightsDataGather = map[string]string{ + "": "\n\nInsightsDataGather provides data gather configuration options for the the Insights Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the required standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the required list of InsightsDataGather objects it may not exceed 100 items", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "gatherConfig": "gatherConfig is an optional spec attribute that includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +var map_PersistentVolumeClaimReference = map[string]string{ + "": "persistentVolumeClaimReference is a reference to a PersistentVolumeClaim.", + "name": "name is a string that follows the DNS1123 subdomain format. It must be at most 253 characters in length, and must consist only of lower case alphanumeric characters, '-' and '.', and must start and end with an alphanumeric character.", +} + +func (PersistentVolumeClaimReference) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimReference +} + +var map_PersistentVolumeConfig = map[string]string{ + "": "persistentVolumeConfig provides configuration options for PersistentVolume storage.", + "claim": "claim is a required field that specifies the configuration of the PersistentVolumeClaim that will be used to store the Insights data archive. The PersistentVolumeClaim must be created in the openshift-insights namespace.", + "mountPath": "mountPath is an optional field specifying the directory where the PVC will be mounted inside the Insights data gathering Pod. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default mount path is /var/lib/insights-operator The path may not exceed 1024 characters and must not contain a colon.", +} + +func (PersistentVolumeConfig) SwaggerDoc() map[string]string { + return map_PersistentVolumeConfig +} + +var map_Storage = map[string]string{ + "": "storage provides persistent storage configuration options for gathering jobs. If the type is set to PersistentVolume, then the PersistentVolume must be defined. If the type is set to Ephemeral, then the PersistentVolume must not be defined.", + "type": "type is a required field that specifies the type of storage that will be used to store the Insights data archive. Valid values are \"PersistentVolume\" and \"Ephemeral\". When set to Ephemeral, the Insights data archive is stored in the ephemeral storage of the gathering job. When set to PersistentVolume, the Insights data archive is stored in the PersistentVolume that is defined by the persistentVolume field.", + "persistentVolume": "persistentVolume is an optional field that specifies the PersistentVolume that will be used to store the Insights data archive. The PersistentVolume must be created in the openshift-insights namespace.", +} + +func (Storage) SwaggerDoc() map[string]string { + return map_Storage +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go index bf5e3cb99f..0160a4a242 100644 --- a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -339,7 +339,7 @@ type ConsolePluginBackend struct { // Service serving certificate. The console backend will proxy the // plugins assets from the Service using the service CA bundle. // +optional - Service *ConsolePluginService `json:"service"` + Service *ConsolePluginService `json:"service,omitempty"` } // ConsolePluginService holds information on Service that is serving @@ -368,7 +368,7 @@ type ConsolePluginService struct { // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$` // +kubebuilder:default:="/" // +optional - BasePath string `json:"basePath"` + BasePath string `json:"basePath,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go index d4fefaa37c..7bb91394ef 100644 --- a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/envtest-releases.yaml b/vendor/github.com/openshift/api/envtest-releases.yaml index e3a8c94cfa..e8688e2b06 100644 --- a/vendor/github.com/openshift/api/envtest-releases.yaml +++ b/vendor/github.com/openshift/api/envtest-releases.yaml @@ -51,3 +51,16 @@ releases: envtest-v1.32.1-linux-arm64.tar.gz: hash: 0bc52e6344ae0753715bc39c2878696c72a3129356df484835586165238361c109ad3e1ebd354af8ecdf1026c3a2b98ed225ad0c6dd348cb3ff128a7cfdcc2f8 selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.32.1-linux-arm64.tar.gz + v1.33.2: + envtest-v1.33.2-darwin-amd64.tar.gz: + hash: d032acdc6f365059e20e22de6f78dd298e4cc48b379639c777032a97b7fe6a1d0c933f2a48879506231d57e4be9407ffc56b46485958023a766c4db3f1efb89e + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-darwin-amd64.tar.gz + envtest-v1.33.2-darwin-arm64.tar.gz: + hash: 51aa29ab015a1d1825e7155137fe30a2aafd311faf63486118d0dd026a9163c65d61e004a42b8e3ff624529734610b4dafdc12220d11d6c159ca9b3622f64497 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-darwin-arm64.tar.gz + envtest-v1.33.2-linux-amd64.tar.gz: + hash: 4f2749f668c49ac67651ca2c284252eea722aa37389c3808957a0e1a7783adf1c183aa07ff9623357b761b96022828bbd09dff019527032a474d923676835faf + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-linux-amd64.tar.gz + envtest-v1.33.2-linux-arm64.tar.gz: + hash: 9936eba66fd0170808268da4c0609b7e7d4d1b0de8607b0d3a9091539b4ec881041a9e08e7b4839708b11139bcc850acd34dfc0305ed955cc61fc3fae9da58f5 + selfLink: https://storage.googleapis.com/openshift-kubebuilder-tools/envtest-v1.33.2-linux-arm64.tar.gz diff --git a/vendor/github.com/openshift/api/features.md b/vendor/github.com/openshift/api/features.md index 2ebfc0b829..0517294220 100644 --- a/vendor/github.com/openshift/api/features.md +++ b/vendor/github.com/openshift/api/features.md @@ -1,100 +1,110 @@ | FeatureGate | Default on Hypershift | Default on SelfManagedHA | DevPreviewNoUpgrade on Hypershift | DevPreviewNoUpgrade on SelfManagedHA | TechPreviewNoUpgrade on Hypershift | TechPreviewNoUpgrade on SelfManagedHA | | ------ | --- | --- | --- | --- | --- | --- | | ClusterAPIInstall| | | | | | | -| ClusterAPIInstallIBMCloud| | | | | | | | EventedPLEG| | | | | | | -| MachineAPIMigration| | | | | | | | MachineAPIOperatorDisableMachineHealthCheckController| | | | | | | | MultiArchInstallAzure| | | | | | | -| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | | | +| ShortCertRotation| | | | | | | +| BootImageSkewEnforcement| | | Enabled | Enabled | | | +| ClusterAPIMachineManagementVSphere| | | Enabled | Enabled | | | | Example2| | | Enabled | Enabled | | | +| ExternalSnapshotMetadata| | | Enabled | Enabled | | | | NewOLMCatalogdAPIV1Metas| | | | Enabled | | Enabled | -| SELinuxChangePolicy| | | Enabled | Enabled | | | -| SELinuxMount| | | Enabled | Enabled | | | -| ShortCertRotation| | | Enabled | Enabled | | | -| SigstoreImageVerificationPKI| | | Enabled | Enabled | | | +| NewOLMOwnSingleNamespace| | | | Enabled | | Enabled | +| NewOLMPreflightPermissionChecks| | | | Enabled | | Enabled | +| NewOLMWebhookProviderOpenshiftServiceCA| | | | Enabled | | Enabled | +| NoRegistryClusterOperations| | | | Enabled | | Enabled | +| VSphereMixedNodeEnv| | | Enabled | Enabled | | | | NewOLM| | Enabled | | Enabled | | Enabled | | AWSClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | +| AWSClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | +| AWSDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AWSDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | +| AWSServiceLBNetworkSecurityGroup| | | Enabled | Enabled | Enabled | Enabled | | AutomatedEtcdBackup| | | Enabled | Enabled | Enabled | Enabled | +| AzureClusterHostedDNSInstall| | | Enabled | Enabled | Enabled | Enabled | +| AzureDedicatedHosts| | | Enabled | Enabled | Enabled | Enabled | +| AzureDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | +| AzureMultiDisk| | | Enabled | Enabled | Enabled | Enabled | | BootcNodeManagement| | | Enabled | Enabled | Enabled | Enabled | -| CPMSMachineNamePrefix| | | Enabled | Enabled | Enabled | Enabled | +| CBORServingAndStorage| | | Enabled | Enabled | Enabled | Enabled | +| ClientsAllowCBOR| | | Enabled | Enabled | Enabled | Enabled | +| ClientsPreferCBOR| | | Enabled | Enabled | Enabled | Enabled | +| ClusterAPIInstallIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | ClusterMonitoringConfig| | | Enabled | Enabled | Enabled | Enabled | -| ConsolePluginContentSecurityPolicy| | | Enabled | Enabled | Enabled | Enabled | +| ClusterVersionOperatorConfiguration| | | Enabled | Enabled | Enabled | Enabled | | DNSNameResolver| | | Enabled | Enabled | Enabled | Enabled | | DualReplica| | | Enabled | Enabled | Enabled | Enabled | | DyanmicServiceEndpointIBMCloud| | | Enabled | Enabled | Enabled | Enabled | | DynamicResourceAllocation| | | Enabled | Enabled | Enabled | Enabled | | EtcdBackendQuota| | | Enabled | Enabled | Enabled | Enabled | | Example| | | Enabled | Enabled | Enabled | Enabled | +| ExternalOIDCWithUIDAndExtraClaimMappings| | | Enabled | Enabled | Enabled | Enabled | | GCPClusterHostedDNS| | | Enabled | Enabled | Enabled | Enabled | | GCPCustomAPIEndpoints| | | Enabled | Enabled | Enabled | Enabled | -| GatewayAPI| | | Enabled | Enabled | Enabled | Enabled | -| GatewayAPIController| | | Enabled | Enabled | Enabled | Enabled | -| HighlyAvailableArbiter| | | Enabled | Enabled | Enabled | Enabled | +| GCPCustomAPIEndpointsInstall| | | Enabled | Enabled | Enabled | Enabled | +| GCPDualStackInstall| | | Enabled | Enabled | Enabled | Enabled | +| ImageModeStatusReporting| | | Enabled | Enabled | Enabled | Enabled | | ImageStreamImportMode| | | Enabled | Enabled | Enabled | Enabled | | IngressControllerDynamicConfigurationManager| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfig| | | Enabled | Enabled | Enabled | Enabled | | InsightsConfigAPI| | | Enabled | Enabled | Enabled | Enabled | | InsightsOnDemandDataGather| | | Enabled | Enabled | Enabled | Enabled | -| InsightsRuntimeExtractor| | | Enabled | Enabled | Enabled | Enabled | +| IrreconcilableMachineConfig| | | Enabled | Enabled | Enabled | Enabled | | KMSEncryptionProvider| | | Enabled | Enabled | Enabled | Enabled | -| MachineAPIProviderOpenStack| | | Enabled | Enabled | Enabled | Enabled | -| MachineConfigNodes| | | Enabled | Enabled | Enabled | Enabled | +| MachineAPIMigration| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesAzure| | | Enabled | Enabled | Enabled | Enabled | +| ManagedBootImagesvSphere| | | Enabled | Enabled | Enabled | Enabled | | MaxUnavailableStatefulSet| | | Enabled | Enabled | Enabled | Enabled | -| MetricsCollectionProfiles| | | Enabled | Enabled | Enabled | Enabled | | MinimumKubeletVersion| | | Enabled | Enabled | Enabled | Enabled | | MixedCPUsAllocation| | | Enabled | Enabled | Enabled | Enabled | +| MultiDiskSetup| | | Enabled | Enabled | Enabled | Enabled | +| MutatingAdmissionPolicy| | | Enabled | Enabled | Enabled | Enabled | | NodeSwap| | | Enabled | Enabled | Enabled | Enabled | | NutanixMultiSubnets| | | Enabled | Enabled | Enabled | Enabled | | OVNObservability| | | Enabled | Enabled | Enabled | Enabled | -| OnClusterBuild| | | Enabled | Enabled | Enabled | Enabled | -| PinnedImages| | | Enabled | Enabled | Enabled | Enabled | -| PlatformOperators| | | Enabled | Enabled | Enabled | Enabled | -| ProcMountType| | | Enabled | Enabled | Enabled | Enabled | -| RouteAdvertisements| | | Enabled | Enabled | Enabled | Enabled | -| RouteExternalCertificate| | | Enabled | Enabled | Enabled | Enabled | -| ServiceAccountTokenNodeBinding| | | Enabled | Enabled | Enabled | Enabled | +| PreconfiguredUDNAddresses| | | Enabled | Enabled | Enabled | Enabled | +| SELinuxMount| | | Enabled | Enabled | Enabled | Enabled | | SignatureStores| | | Enabled | Enabled | Enabled | Enabled | -| SigstoreImageVerification| | | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerificationPKI| | | Enabled | Enabled | Enabled | Enabled | | TranslateStreamCloseWebsocketRequests| | | Enabled | Enabled | Enabled | Enabled | -| UpgradeStatus| | | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesPodSecurityStandards| | | Enabled | Enabled | Enabled | Enabled | -| UserNamespacesSupport| | | Enabled | Enabled | Enabled | Enabled | | VSphereConfigurableMaxAllowedBlockVolumesPerNode| | | Enabled | Enabled | Enabled | Enabled | | VSphereHostVMGroupZonal| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiDisk| | | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiNetworks| | | Enabled | Enabled | Enabled | Enabled | | VolumeAttributesClass| | | Enabled | Enabled | Enabled | Enabled | | VolumeGroupSnapshot| | | Enabled | Enabled | Enabled | Enabled | | ExternalOIDC| Enabled | | Enabled | Enabled | Enabled | Enabled | -| AWSEFSDriverVolumeMetrics| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdditionalRoutingCapabilities| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AdminNetworkPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AlibabaPlatform| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | AzureWorkloadIdentity| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| BareMetalLoadBalancer| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | BuildCSIVolumes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ChunkSizeMiB| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| CloudDualStackNodeIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| DisableKubeletCloudCredentialProviders| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| GCPLabelsTags| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| HardwareSpeed| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| CPMSMachineNamePrefix| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ConsolePluginContentSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GCPClusterHostedDNSInstall| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPI| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| GatewayAPIController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| HighlyAvailableArbiter| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ImageVolume| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | IngressControllerLBSubnetsAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | KMSv1| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MachineConfigNodes| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | ManagedBootImagesAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MultiArchInstallAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| MultiArchInstallGCP| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| MetricsCollectionProfiles| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkDiagnosticsConfig| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkLiveMigration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | NetworkSegmentation| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| NodeDisruptionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | OpenShiftPodSecurityAdmission| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PersistentIPsForVirtualization| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| PrivateHostedZoneAWS| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| PinnedImages| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ProcMountType| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteAdvertisements| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| RouteExternalCertificate| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| ServiceAccountTokenNodeBinding| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | | SetEIPForNLBIngressController| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereControlPlaneMachineSet| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereDriverConfiguration| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereMultiVCenters| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| VSphereStaticIPs| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | -| ValidatingAdmissionPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| SigstoreImageVerification| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| StoragePerformantSecurityPolicy| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UpgradeStatus| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesPodSecurityStandards| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| UserNamespacesSupport| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiDisk| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | +| VSphereMultiNetworks| Enabled | Enabled | Enabled | Enabled | Enabled | Enabled | diff --git a/vendor/github.com/openshift/api/features/features.go b/vendor/github.com/openshift/api/features/features.go index 8f30373e92..231bad62b6 100644 --- a/vendor/github.com/openshift/api/features/features.go +++ b/vendor/github.com/openshift/api/features/features.go @@ -40,24 +40,24 @@ var ( reportProblemsToJiraComponent("Management Console"). contactPerson("jhadvig"). productScope(ocpSpecific). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1706"). mustRegister() FeatureGateServiceAccountTokenNodeBinding = newFeatureGate("ServiceAccountTokenNodeBinding"). reportProblemsToJiraComponent("apiserver-auth"). - contactPerson("stlaz"). + contactPerson("ibihim"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4193"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateValidatingAdmissionPolicy = newFeatureGate("ValidatingAdmissionPolicy"). + FeatureGateMutatingAdmissionPolicy = newFeatureGate("MutatingAdmissionPolicy"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("benluddy"). productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3488"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3962"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateGatewayAPI = newFeatureGate("GatewayAPI"). @@ -65,7 +65,7 @@ var ( contactPerson("miciah"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateSetEIPForNLBIngressController = newFeatureGate("SetEIPForNLBIngressController"). @@ -92,22 +92,16 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + // OpenShift and Node Team will keep this turned off until evictions and + // disk provisioning are fixed even though upstream will take this GA. FeatureGateNodeSwap = newFeatureGate("NodeSwap"). reportProblemsToJiraComponent("node"). - contactPerson("ehashman"). + contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/2400"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateMachineAPIProviderOpenStack = newFeatureGate("MachineAPIProviderOpenStack"). - reportProblemsToJiraComponent("openstack"). - contactPerson("egarcia"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateInsightsConfigAPI = newFeatureGate("InsightsConfigAPI"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -116,14 +110,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateInsightsRuntimeExtractor = newFeatureGate("InsightsRuntimeExtractor"). - reportProblemsToJiraComponent("insights"). - contactPerson("jmesnil"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateDynamicResourceAllocation = newFeatureGate("DynamicResourceAllocation"). reportProblemsToJiraComponent("scheduling"). contactPerson("jchaloup"). @@ -140,6 +126,14 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateAzureDedicatedHosts = newFeatureGate("AzureDedicatedHosts"). + reportProblemsToJiraComponent("installer"). + contactPerson("rvanderp3"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1783"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateMaxUnavailableStatefulSet = newFeatureGate("MaxUnavailableStatefulSet"). reportProblemsToJiraComponent("apps"). contactPerson("atiratree"). @@ -155,20 +149,12 @@ var ( enhancementPR("https://github.com/kubernetes/enhancements/issues/3386"). mustRegister() - FeatureGatePrivateHostedZoneAWS = newFeatureGate("PrivateHostedZoneAWS"). - reportProblemsToJiraComponent("Routing"). - contactPerson("miciah"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateSigstoreImageVerification = newFeatureGate("SigstoreImageVerification"). reportProblemsToJiraComponent("node"). contactPerson("sgrunert"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateSigstoreImageVerificationPKI = newFeatureGate("SigstoreImageVerificationPKI"). @@ -176,17 +162,9 @@ var ( contactPerson("QiWang"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1658"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateGCPLabelsTags = newFeatureGate("GCPLabelsTags"). - reportProblemsToJiraComponent("Installer"). - contactPerson("bhb"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateAlibabaPlatform = newFeatureGate("AlibabaPlatform"). reportProblemsToJiraComponent("cloud-provider"). contactPerson("jspeed"). @@ -195,14 +173,6 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateCloudDualStackNodeIPs = newFeatureGate("CloudDualStackNodeIPs"). - reportProblemsToJiraComponent("machine-config-operator/platform-baremetal"). - contactPerson("mkowalsk"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/3705"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateVSphereHostVMGroupZonal = newFeatureGate("VSphereHostVMGroupZonal"). reportProblemsToJiraComponent("splat"). contactPerson("jcpowermac"). @@ -216,31 +186,15 @@ var ( contactPerson("vr4manta"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1709"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereMultiVCenters = newFeatureGate("VSphereMultiVCenters"). - reportProblemsToJiraComponent("splat"). - contactPerson("vr4manta"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereStaticIPs = newFeatureGate("VSphereStaticIPs"). - reportProblemsToJiraComponent("splat"). - contactPerson("rvanderp3"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateRouteExternalCertificate = newFeatureGate("RouteExternalCertificate"). reportProblemsToJiraComponent("router"). - contactPerson("thejasn"). + contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateCPMSMachineNamePrefix = newFeatureGate("CPMSMachineNamePrefix"). @@ -248,7 +202,7 @@ var ( contactPerson("chiragkyal"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1714"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateAdminNetworkPolicy = newFeatureGate("AdminNetworkPolicy"). @@ -280,7 +234,7 @@ var ( contactPerson("jcaamano"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateNetworkLiveMigration = newFeatureGate("NetworkLiveMigration"). @@ -307,14 +261,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateHardwareSpeed = newFeatureGate("HardwareSpeed"). - reportProblemsToJiraComponent("etcd"). - contactPerson("hasbro17"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateBackendQuotaGiB = newFeatureGate("EtcdBackendQuota"). reportProblemsToJiraComponent("etcd"). contactPerson("hasbro17"). @@ -346,22 +292,22 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateVSphereControlPlaneMachineset = newFeatureGate("VSphereControlPlaneMachineSet"). - reportProblemsToJiraComponent("splat"). - contactPerson("rvanderp3"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMachineConfigNodes = newFeatureGate("MachineConfigNodes"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("cdoern"). + contactPerson("ijanssen"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1765"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateImageModeStatusReporting = newFeatureGate("ImageModeStatusReporting"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("ijanssen"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1809"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateClusterAPIInstall = newFeatureGate("ClusterAPIInstall"). reportProblemsToJiraComponent("Installer"). contactPerson("vincepri"). @@ -385,6 +331,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateAzureClusterHostedDNSInstall = newFeatureGate("AzureClusterHostedDNSInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("sadasu"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1468"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateMixedCPUsAllocation = newFeatureGate("MixedCPUsAllocation"). reportProblemsToJiraComponent("NodeTuningOperator"). contactPerson("titzhak"). @@ -409,21 +363,29 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateDisableKubeletCloudCredentialProviders = newFeatureGate("DisableKubeletCloudCredentialProviders"). - reportProblemsToJiraComponent("cloud-provider"). - contactPerson("jspeed"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/2395"). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateManagedBootImagesvSphere = newFeatureGate("ManagedBootImagesvSphere"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("rsaini"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1496"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() - FeatureGateOnClusterBuild = newFeatureGate("OnClusterBuild"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("dkhater"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + FeatureGateManagedBootImagesAzure = newFeatureGate("ManagedBootImagesAzure"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateBootImageSkewEnforcement = newFeatureGate("BootImageSkewEnforcement"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("djoshy"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1761"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() FeatureGateBootcNodeManagement = newFeatureGate("BootcNodeManagement"). reportProblemsToJiraComponent("MachineConfigOperator"). @@ -451,10 +413,10 @@ var ( FeatureGatePinnedImages = newFeatureGate("PinnedImages"). reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jhernand"). + contactPerson("RishabhSaini"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateUpgradeStatus = newFeatureGate("UpgradeStatus"). @@ -462,7 +424,7 @@ var ( contactPerson("pmuller"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateTranslateStreamCloseWebsocketRequests = newFeatureGate("TranslateStreamCloseWebsocketRequests"). @@ -489,6 +451,14 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateExternalSnapshotMetadata = newFeatureGate("ExternalSnapshotMetadata"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("jdobson"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/3314"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + FeatureGateExternalOIDC = newFeatureGate("ExternalOIDC"). reportProblemsToJiraComponent("authentication"). contactPerson("liouk"). @@ -498,6 +468,15 @@ var ( enableForClusterProfile(Hypershift, configv1.Default, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateExternalOIDCWithAdditionalClaimMappings = newFeatureGate("ExternalOIDCWithUIDAndExtraClaimMappings"). + reportProblemsToJiraComponent("authentication"). + contactPerson("bpalmer"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1777"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableForClusterProfile(Hypershift, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateExample = newFeatureGate("Example"). reportProblemsToJiraComponent("cluster-config"). contactPerson("deads"). @@ -514,14 +493,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade). mustRegister() - FeatureGatePlatformOperators = newFeatureGate("PlatformOperators"). - reportProblemsToJiraComponent("olm"). - contactPerson("joe"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateNewOLM = newFeatureGate("NewOLM"). reportProblemsToJiraComponent("olm"). contactPerson("joe"). @@ -538,6 +509,30 @@ var ( enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + FeatureGateNewOLMPreflightPermissionChecks = newFeatureGate("NewOLMPreflightPermissionChecks"). + reportProblemsToJiraComponent("olm"). + contactPerson("tshort"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1768"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLMOwnSingleNamespace = newFeatureGate("NewOLMOwnSingleNamespace"). + reportProblemsToJiraComponent("olm"). + contactPerson("nschieder"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1774"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateNewOLMWebhookProviderOpenshiftServiceCA = newFeatureGate("NewOLMWebhookProviderOpenshiftServiceCA"). + reportProblemsToJiraComponent("olm"). + contactPerson("pegoncal"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1799"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateInsightsOnDemandDataGather = newFeatureGate("InsightsOnDemandDataGather"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -546,14 +541,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateBareMetalLoadBalancer = newFeatureGate("BareMetalLoadBalancer"). - reportProblemsToJiraComponent("metal"). - contactPerson("EmilienM"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateInsightsConfig = newFeatureGate("InsightsConfig"). reportProblemsToJiraComponent("insights"). contactPerson("tremes"). @@ -562,27 +549,11 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateNodeDisruptionPolicy = newFeatureGate("NodeDisruptionPolicy"). - reportProblemsToJiraComponent("MachineConfigOperator"). - contactPerson("jerzhang"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMetricsCollectionProfiles = newFeatureGate("MetricsCollectionProfiles"). reportProblemsToJiraComponent("Monitoring"). contactPerson("rexagod"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - - FeatureGateVSphereDriverConfiguration = newFeatureGate("VSphereDriverConfiguration"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("rbednar"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() @@ -591,29 +562,23 @@ var ( contactPerson("cjschaef"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateChunkSizeMiB = newFeatureGate("ChunkSizeMiB"). - reportProblemsToJiraComponent("Image Registry"). - contactPerson("flavianmissi"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMachineAPIMigration = newFeatureGate("MachineAPIMigration"). reportProblemsToJiraComponent("OCPCLOUD"). contactPerson("jspeed"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGatePersistentIPsForVirtualization = newFeatureGate("PersistentIPsForVirtualization"). - reportProblemsToJiraComponent("CNV Network"). - contactPerson("mduarted"). + FeatureGateClusterAPIMachineManagementVSphere = newFeatureGate("ClusterAPIMachineManagementVSphere"). + reportProblemsToJiraComponent("SPLAT"). + contactPerson("jcpowermac"). productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enhancementPR("https://github.com/openshift/enhancements/pull/1465"). + enableIn(configv1.DevPreviewNoUpgrade). mustRegister() FeatureGateClusterMonitoringConfig = newFeatureGate("ClusterMonitoringConfig"). @@ -624,14 +589,6 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateMultiArchInstallAWS = newFeatureGate("MultiArchInstallAWS"). - reportProblemsToJiraComponent("Installer"). - contactPerson("r4f4"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateMultiArchInstallAzure = newFeatureGate("MultiArchInstallAzure"). reportProblemsToJiraComponent("Installer"). contactPerson("r4f4"). @@ -639,14 +596,6 @@ var ( enhancementPR(legacyFeatureGateWithoutEnhancement). mustRegister() - FeatureGateMultiArchInstallGCP = newFeatureGate("MultiArchInstallGCP"). - reportProblemsToJiraComponent("Installer"). - contactPerson("r4f4"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateIngressControllerLBSubnetsAWS = newFeatureGate("IngressControllerLBSubnetsAWS"). reportProblemsToJiraComponent("Routing"). contactPerson("miciah"). @@ -655,14 +604,6 @@ var ( enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateAWSEFSDriverVolumeMetrics = newFeatureGate("AWSEFSDriverVolumeMetrics"). - reportProblemsToJiraComponent("Storage / Kubernetes External Components"). - contactPerson("fbertina"). - productScope(ocpSpecific). - enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() - FeatureGateImageStreamImportMode = newFeatureGate("ImageStreamImportMode"). reportProblemsToJiraComponent("Multi-Arch"). contactPerson("psundara"). @@ -676,15 +617,18 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() + // Note: this feature is perma-alpha, but it is safe and desireable to enable. + // It was an oversight in upstream to not remove the feature gate after the version skew became safe in 1.33. + // See https://github.com/kubernetes/enhancements/tree/d4226c42/keps/sig-node/127-user-namespaces#pod-security-standards-pss-integration FeatureGateUserNamespacesPodSecurityStandards = newFeatureGate("UserNamespacesPodSecurityStandards"). reportProblemsToJiraComponent("Node"). contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/127"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateProcMountType = newFeatureGate("ProcMountType"). @@ -692,7 +636,7 @@ var ( contactPerson("haircommander"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/4265"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). mustRegister() FeatureGateVSphereMultiNetworks = newFeatureGate("VSphereMultiNetworks"). @@ -700,7 +644,7 @@ var ( contactPerson("rvanderp"). productScope(ocpSpecific). enhancementPR(legacyFeatureGateWithoutEnhancement). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateIngressControllerDynamicConfigurationManager = newFeatureGate("IngressControllerDynamicConfigurationManager"). @@ -740,18 +684,15 @@ var ( contactPerson("eggfoobar"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1674"). - // TODO: Do not go GA until jira issue is resolved: https://issues.redhat.com/browse/OCPEDGE-1637 - // Annotations must correctly handle either DualReplica or HighlyAvailableArbiter going GA with - // the other still in TechPreview. - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + mustRegister() FeatureGateCVOConfiguration = newFeatureGate("ClusterVersionOperatorConfiguration"). reportProblemsToJiraComponent("Cluster Version Operator"). contactPerson("dhurta"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1492"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateGCPCustomAPIEndpoints = newFeatureGate("GCPCustomAPIEndpoints"). @@ -770,20 +711,12 @@ var ( enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() - FeatureGateSELinuxChangePolicy = newFeatureGate("SELinuxChangePolicy"). - reportProblemsToJiraComponent("Storage / Kubernetes"). - contactPerson("jsafrane"). - productScope(kubernetes). - enhancementPR("https://github.com/kubernetes/enhancements/issues/1710"). - enableIn(configv1.DevPreviewNoUpgrade). - mustRegister() - FeatureGateSELinuxMount = newFeatureGate("SELinuxMount"). reportProblemsToJiraComponent("Storage / Kubernetes"). contactPerson("jsafrane"). productScope(kubernetes). enhancementPR("https://github.com/kubernetes/enhancements/issues/1710"). - enableIn(configv1.DevPreviewNoUpgrade). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureGateDualReplica = newFeatureGate("DualReplica"). @@ -791,11 +724,8 @@ var ( contactPerson("jaypoulz"). productScope(ocpSpecific). enhancementPR("https://github.com/openshift/enhancements/pull/1675"). - // TODO: Do not go GA until jira issue is resolved: https://issues.redhat.com/browse/OCPEDGE-1637 - // Annotations must correctly handle either DualReplica or HighlyAvailableArbiter going GA with - // the other still in TechPreview. - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). - mustRegister() + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() FeatureGateGatewayAPIController = newFeatureGate("GatewayAPIController"). reportProblemsToJiraComponent("Routing"). @@ -807,14 +737,13 @@ var ( // A dedicated feature gate now controls the Gateway Controller to distinguish // its production readiness from that of the CRDs. enhancementPR("https://github.com/openshift/enhancements/pull/1756"). - enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() FeatureShortCertRotation = newFeatureGate("ShortCertRotation"). reportProblemsToJiraComponent("kube-apiserver"). contactPerson("vrutkovs"). productScope(ocpSpecific). - enableIn(configv1.DevPreviewNoUpgrade). enhancementPR("https://github.com/openshift/enhancements/pull/1670"). mustRegister() @@ -825,4 +754,155 @@ var ( enhancementPR("https://github.com/openshift/enhancements/pull/1748"). enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). mustRegister() + + FeatureGateAzureMultiDisk = newFeatureGate("AzureMultiDisk"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1779"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateStoragePerformantSecurityPolicy = newFeatureGate("StoragePerformantSecurityPolicy"). + reportProblemsToJiraComponent("Storage / Kubernetes External Components"). + contactPerson("hekumar"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1804"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + mustRegister() + + FeatureGateMultiDiskSetup = newFeatureGate("MultiDiskSetup"). + reportProblemsToJiraComponent("splat"). + contactPerson("jcpowermac"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1805"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSDedicatedHosts = newFeatureGate("AWSDedicatedHosts"). + reportProblemsToJiraComponent("Installer"). + contactPerson("faermanj"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1781"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateVSphereMixedNodeEnv = newFeatureGate("VSphereMixedNodeEnv"). + reportProblemsToJiraComponent("splat"). + contactPerson("vr4manta"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1772"). + enableIn(configv1.DevPreviewNoUpgrade). + mustRegister() + + FeatureGatePreconfiguredUDNAddresses = newFeatureGate("PreconfiguredUDNAddresses"). + reportProblemsToJiraComponent("Networking/ovn-kubernetes"). + contactPerson("kyrtapz"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1793"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSServiceLBNetworkSecurityGroup = newFeatureGate("AWSServiceLBNetworkSecurityGroup"). + reportProblemsToJiraComponent("Cloud Compute / Cloud Controller Manager"). + contactPerson("mtulio"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1802"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateImageVolume = newFeatureGate("ImageVolume"). + reportProblemsToJiraComponent("Node"). + contactPerson("haircommander"). + productScope(kubernetes). + enhancementPR("https://github.com/openshift/enhancements/pull/1792"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade, configv1.Default). + mustRegister() + + FeatureGateNoRegistryClusterOperations = newFeatureGate("NoRegistryClusterOperations"). + reportProblemsToJiraComponent("Installer / Agent based installation"). + contactPerson("andfasano"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1821"). + enableForClusterProfile(SelfManaged, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPClusterHostedDNSInstall = newFeatureGate("GCPClusterHostedDNSInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1468"). + enableIn(configv1.Default, configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAWSClusterHostedDNSInstall = newFeatureGate("AWSClusterHostedDNSInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1468"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPCustomAPIEndpointsInstall = newFeatureGate("GCPCustomAPIEndpointsInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1492"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateIrreconcilableMachineConfig = newFeatureGate("IrreconcilableMachineConfig"). + reportProblemsToJiraComponent("MachineConfigOperator"). + contactPerson("pabrodri"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1785"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + FeatureGateAWSDualStackInstall = newFeatureGate("AWSDualStackInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("sadasu"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1806"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateAzureDualStackInstall = newFeatureGate("AzureDualStackInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("jhixson74"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1806"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureGateGCPDualStackInstall = newFeatureGate("GCPDualStackInstall"). + reportProblemsToJiraComponent("Installer"). + contactPerson("barbacbd"). + productScope(ocpSpecific). + enhancementPR("https://github.com/openshift/enhancements/pull/1806"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureCBORServingAndStorage = newFeatureGate("CBORServingAndStorage"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("benluddy"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4222"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureCBORClientsAllowCBOR = newFeatureGate("ClientsAllowCBOR"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("benluddy"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4222"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() + + FeatureClientsPreferCBOR = newFeatureGate("ClientsPreferCBOR"). + reportProblemsToJiraComponent("kube-apiserver"). + contactPerson("benluddy"). + productScope(kubernetes). + enhancementPR("https://github.com/kubernetes/enhancements/issues/4222"). + enableIn(configv1.DevPreviewNoUpgrade, configv1.TechPreviewNoUpgrade). + mustRegister() ) diff --git a/vendor/github.com/openshift/api/features/legacyfeaturegates.go b/vendor/github.com/openshift/api/features/legacyfeaturegates.go index 132a3dacb3..dd11fdf663 100644 --- a/vendor/github.com/openshift/api/features/legacyfeaturegates.go +++ b/vendor/github.com/openshift/api/features/legacyfeaturegates.go @@ -17,14 +17,10 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "AzureWorkloadIdentity", // never add to this list, if you think you have an exception ask @deads2k - "BareMetalLoadBalancer", - // never add to this list, if you think you have an exception ask @deads2k "BootcNodeManagement", // never add to this list, if you think you have an exception ask @deads2k "BuildCSIVolumes", // never add to this list, if you think you have an exception ask @deads2k - "ChunkSizeMiB", - // never add to this list, if you think you have an exception ask @deads2k "ClusterAPIInstall", // never add to this list, if you think you have an exception ask @deads2k "ClusterAPIInstallIBMCloud", @@ -41,8 +37,6 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "GCPClusterHostedDNS", // never add to this list, if you think you have an exception ask @deads2k - "GCPLabelsTags", - // never add to this list, if you think you have an exception ask @deads2k "GatewayAPI", // never add to this list, if you think you have an exception ask @deads2k "HardwareSpeed", @@ -93,18 +87,12 @@ var legacyFeatureGates = sets.New( // never add to this list, if you think you have an exception ask @deads2k "NewOLM", // never add to this list, if you think you have an exception ask @deads2k - "NodeDisruptionPolicy", - // never add to this list, if you think you have an exception ask @deads2k "OVNObservability", // never add to this list, if you think you have an exception ask @deads2k - "OnClusterBuild", - // never add to this list, if you think you have an exception ask @deads2k "PersistentIPsForVirtualization", // never add to this list, if you think you have an exception ask @deads2k "PinnedImages", // never add to this list, if you think you have an exception ask @deads2k - "PlatformOperators", - // never add to this list, if you think you have an exception ask @deads2k "PrivateHostedZoneAWS", // never add to this list, if you think you have an exception ask @deads2k "RouteAdvertisements", diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go index da33cc3efb..278e681108 100644 --- a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1beta1 diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go index 2ce8330b2c..be828dbe45 100644 --- a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package docker10 diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go index 0e8ecb20d5..c3c9b29d20 100644 --- a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package dockerpre012 diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto index dabdc6d84a..74d8d66c47 100644 --- a/vendor/github.com/openshift/api/image/v1/generated.proto +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -31,11 +31,11 @@ message DockerImageReference { optional string iD = 5; } -// Image is an immutable representation of a container image and metadata at a point in time. +// Image is an immutable representation of a container image and its metadata at a point in time. // Images are named by taking a hash of their contents (metadata and content) and any change // in format, content, or metadata results in a new name. The images resource is primarily // for use by cluster administrators and integrations like the cluster image registry - end -// users instead access images via the imagestreamtags or imagestreamimages resources. While +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While // image metadata is stored in the API, any integration that implements the container image // registry API must provide its own storage for the raw manifest data, image config, and // layer contents. @@ -353,12 +353,15 @@ message ImageStreamImportSpec { // ImageStreamImportStatus contains information about the status of an image stream import. message ImageStreamImportStatus { // import is the image stream that was successfully updated or created when 'to' was set. + // +optional optional ImageStream import = 1; // repository is set if spec.repository was set to the outcome of the import + // +optional optional RepositoryImportStatus repository = 2; // images is set with the result of importing spec.images + // +optional repeated ImageImportStatus images = 3; } @@ -436,17 +439,20 @@ message ImageStreamSpec { message ImageStreamStatus { // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located + // +optional optional string dockerImageRepository = 1; // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. + // +optional optional string publicDockerImageRepository = 3; // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge + // +optional repeated NamedTagEventList tags = 2; } @@ -504,7 +510,7 @@ message ImageStreamTagList { // the status history, and the currently referenced image (if any) of the provided // tag. This type replaces the ImageStreamTag by providing a full view of the tag. // ImageTags are returned for every spec or status tag present on the image stream. -// If no tag exists in either form a not found error will be returned by the API. +// If no tag exists in either form, a not found error will be returned by the API. // A create operation will succeed if no spec tag has already been defined and the // spec field is set. Delete will remove both spec and status elements from the // image stream. diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go index d4ee4bff69..e45d6b10da 100644 --- a/vendor/github.com/openshift/api/image/v1/types.go +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -27,11 +27,11 @@ type ImageList struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// Image is an immutable representation of a container image and metadata at a point in time. +// Image is an immutable representation of a container image and its metadata at a point in time. // Images are named by taking a hash of their contents (metadata and content) and any change // in format, content, or metadata results in a new name. The images resource is primarily // for use by cluster administrators and integrations like the cluster image registry - end -// users instead access images via the imagestreamtags or imagestreamimages resources. While +// users, instead, access images via the imagestreamtags or imagestreamimages resources. While // image metadata is stored in the API, any integration that implements the container image // registry API must provide its own storage for the raw manifest data, image config, and // layer contents. @@ -357,15 +357,18 @@ type TagReferencePolicy struct { type ImageStreamStatus struct { // dockerImageRepository represents the effective location this stream may be accessed at. // May be empty until the server determines where the repository is located + // +optional DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` // publicDockerImageRepository represents the public location from where the image can // be pulled outside the cluster. This field may be empty if the administrator // has not exposed the integrated registry externally. + // +optional PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` // tags are a historical record of images associated with each tag. The first entry in the // TagEvent array is the currently tagged image. // +patchMergeKey=tag // +patchStrategy=merge + // +optional Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` } @@ -512,7 +515,7 @@ type ImageStreamTagList struct { // the status history, and the currently referenced image (if any) of the provided // tag. This type replaces the ImageStreamTag by providing a full view of the tag. // ImageTags are returned for every spec or status tag present on the image stream. -// If no tag exists in either form a not found error will be returned by the API. +// If no tag exists in either form, a not found error will be returned by the API. // A create operation will succeed if no spec tag has already been defined and the // spec field is set. Delete will remove both spec and status elements from the // image stream. @@ -701,10 +704,13 @@ type ImageStreamImportSpec struct { // ImageStreamImportStatus contains information about the status of an image stream import. type ImageStreamImportStatus struct { // import is the image stream that was successfully updated or created when 'to' was set. + // +optional Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` // repository is set if spec.repository was set to the outcome of the import + // +optional Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` // images is set with the result of importing spec.images + // +optional Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` } diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go index 953f70263c..af6eee05c5 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go index e0720bec77..9671768f6e 100644 --- a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -25,7 +25,7 @@ func (DockerImageReference) SwaggerDoc() map[string]string { } var map_Image = map[string]string{ - "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Image is an immutable representation of a container image and its metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users, instead, access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "dockerImageReference": "dockerImageReference is the string that can be used to pull this image.", "dockerImageMetadata": "dockerImageMetadata contains metadata about this image", @@ -284,7 +284,7 @@ func (ImageStreamTagList) SwaggerDoc() map[string]string { } var map_ImageTag = map[string]string{ - "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form, a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go index 3dde9d4c3c..4fea205401 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/types.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -129,9 +129,11 @@ type ImageRegistryStatus struct { operatorv1.OperatorStatus `json:",inline"` // storageManaged is deprecated, please refer to Storage.managementState + // +required StorageManaged bool `json:"storageManaged"` // storage indicates the current applied storage configuration of the // registry. + // +required Storage ImageRegistryConfigStorage `json:"storage"` } @@ -213,7 +215,6 @@ type ImageRegistryConfigStorageS3 struct { // The minimum value is 5 and the maximum value is 5120 (5 GiB). // +kubebuilder:validation:Minimum=5 // +kubebuilder:validation:Maximum=5120 - // +openshift:enable:FeatureGate=ChunkSizeMiB // +optional ChunkSizeMiB int32 `json:"chunkSizeMiB,omitempty"` // encrypt specifies whether the registry stores the image in encrypted @@ -483,7 +484,7 @@ type EncryptionAlibaba struct { // +kubebuilder:validation:Enum="KMS";"AES256" // +kubebuilder:default="AES256" // +optional - Method AlibabaEncryptionMethod `json:"method"` + Method AlibabaEncryptionMethod `json:"method,omitempty"` // kms (key management service) is an encryption type that holds the struct for KMS KeyID // +optional diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go index 63f25fc19e..335bd24216 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml index a6ccc2262a..95613c7ae2 100644 --- a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.featuregated-crd-manifests.yaml @@ -4,8 +4,7 @@ configs.imageregistry.operator.openshift.io: CRDName: configs.imageregistry.operator.openshift.io Capability: "" Category: "" - FeatureGates: - - ChunkSizeMiB + FeatureGates: [] FilenameOperatorName: "" FilenameOperatorOrdering: "00" FilenameRunLevel: "" diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go index e4378aa527..2502e1fe37 100644 --- a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go index c0e03c233a..f2db8e9ccb 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/types.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -801,7 +801,7 @@ type TokenConfig struct { // accessTokenInactivityTimeoutSeconds defined the default token // inactivity timeout for tokens granted by any client. // Setting it to nil means the feature is completely disabled (default) - // The default setting can be overriden on OAuthClient basis. + // The default setting can be overridden on OAuthClient basis. // The value represents the maximum amount of time that can occur between // consecutive uses of the token. Tokens become invalid if they are not // used within this temporal window. The user will need to acquire a new diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go index 17d717ea45..842210c24e 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go index 42444e8aee..a915c00425 100644 --- a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -927,7 +927,7 @@ var map_TokenConfig = map[string]string{ "": "TokenConfig holds the necessary configuration options for authorization and access tokens", "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", - "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overridden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", } func (TokenConfig) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go index ead8b20771..d7661cf389 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -135,7 +135,7 @@ type ControlPlaneMachineSetTemplate struct { // Currently, the only valid value is machines_v1beta1_machine_openshift_io. // +unionDiscriminator // +required - MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"` + MachineType ControlPlaneMachineSetMachineType `json:"machineType"` // OpenShiftMachineV1Beta1Machine defines the template for creating Machines // from the v1beta1.machine.openshift.io API group. @@ -174,7 +174,7 @@ type OpenShiftMachineV1Beta1MachineTemplate struct { // The ProviderSpec within contains platform specific details // for creating the Control Plane Machines. // The ProviderSe should be complete apart from the platform specific - // failure domain field. This will be overriden when the Machines + // failure domain field. This will be overridden when the Machines // are created based on the FailureDomains field. // +required Spec machinev1beta1.MachineSpec `json:"spec"` @@ -286,7 +286,6 @@ type FailureDomains struct { VSphere []VSphereFailureDomain `json:"vsphere,omitempty"` // openstack configures failure domain information for the OpenStack platform. - // +optional // // + --- // + Unlike other platforms, OpenStack failure domains can be empty. diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go index e2ddde2ad7..446082efb3 100644 --- a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -238,7 +238,7 @@ type NutanixVMStorageConfig struct { // storageContainer refers to the storage_container used by the VM disk. // +optional - StorageContainer *NutanixStorageResourceIdentifier `json:"storageContainer"` + StorageContainer *NutanixStorageResourceIdentifier `json:"storageContainer,omitempty"` } // NutanixDiskDeviceType is the VM disk device type. @@ -286,7 +286,7 @@ type NutanixVMDiskDeviceProperties struct { // If the deviceType is "Disk", the valid adapterType can be "SCSI", "IDE", "PCI", "SATA" or "SPAPR". // If the deviceType is "CDRom", the valid adapterType can be "IDE" or "SATA". // +required - AdapterType NutanixDiskAdapterType `json:"adapterType,omitempty"` + AdapterType NutanixDiskAdapterType `json:"adapterType"` // deviceIndex is the index of the disk address. The valid values are non-negative integers, with the default value 0. // For a Machine VM, the deviceIndex for the disks with the same deviceType.adapterType combination should diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go index f30514381b..61294ef58f 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go index c0b8c4ce42..2e35df7e23 100644 --- a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -280,7 +280,7 @@ var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", "failureDomains": "failureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.", - "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", + "spec": "spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overridden when the Machines are created based on the FailureDomains field.", } func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go index f61b35ab44..179a34778e 100644 --- a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go index d69bcd0233..db15df2cc4 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -140,8 +140,10 @@ type BlockDeviceMappingSpec struct { // https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice type EBSBlockDeviceSpec struct { // Indicates whether the EBS volume is deleted on machine termination. + // + // Deprecated: setting this field has no effect. // +optional - DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + DeprecatedDeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes // may only be attached to machines that support Amazon EBS encryption. // +optional diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go index 760360bd57..2cb43d4313 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -368,7 +368,7 @@ type DataDisk struct { // +kubebuilder:validation:Minimum=0 // +kubebuilder:validation:Maximum=63 // +required - Lun int32 `json:"lun,omitempty"` + Lun int32 `json:"lun"` // cachingType specifies the caching requirements. // Empty value means no opinion and the platform chooses a default, which is subject to change over time. // Currently the default is CachingTypeNone. @@ -518,7 +518,7 @@ type SecuritySettings struct { // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch // +required // +unionDiscriminator - SecurityType SecurityTypes `json:"securityType,omitempty"` + SecurityType SecurityTypes `json:"securityType"` // confidentialVM specifies the security configuration of the virtual machine. // For more information regarding Confidential VMs, please refer to: // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview @@ -535,14 +535,14 @@ type SecuritySettings struct { type ConfidentialVM struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. // +required - UEFISettings UEFISettings `json:"uefiSettings,omitempty"` + UEFISettings UEFISettings `json:"uefiSettings"` } // TrustedLaunch defines the UEFI settings for the virtual machine. type TrustedLaunch struct { // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. // +required - UEFISettings UEFISettings `json:"uefiSettings,omitempty"` + UEFISettings UEFISettings `json:"uefiSettings"` } // UEFISettings specifies the security settings like secure boot and vTPM used while creating the diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go index 9bd3bdd60b..33f472f926 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -392,6 +392,7 @@ type MachineStatus struct { // conditions defines the current state of the Machine // +listType=map // +listMapKey=type + // +optional Conditions []Condition `json:"conditions,omitempty"` // authoritativeAPI is the API that is authoritative for this resource. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go index 76c79acb0b..f80d716a0d 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -76,6 +76,7 @@ type MachineHealthCheckSpec struct { // Expects either a postive integer value or a percentage value. // Percentage values must be positive whole numbers and are capped at 100%. // Both 0 and 0% are valid and will block all remediation. + // Defaults to 100% if not set. // +kubebuilder:default:="100%" // +kubebuilder:validation:XIntOrString // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" @@ -89,7 +90,6 @@ type MachineHealthCheckSpec struct { // Expects an unsigned duration string of decimal numbers each with optional // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". - // +optional // +kubebuilder:default:="10m" // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" // +kubebuilder:validation:Type:=string @@ -130,10 +130,12 @@ type UnhealthyCondition struct { type MachineHealthCheckStatus struct { // total number of machines counted by this machine health check // +kubebuilder:validation:Minimum=0 + // +optional ExpectedMachines *int `json:"expectedMachines"` // total number of machines counted by this machine health check // +kubebuilder:validation:Minimum=0 + // +optional CurrentHealthy *int `json:"currentHealthy"` // remediationsAllowed is the number of further remediations allowed by this machine health check before diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go index a29977f347..a2343dc398 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -114,6 +114,7 @@ type MachineTemplateSpec struct { // +openshift:validation:FeatureGateAwareXValidation:featureGate=MachineAPIMigration,rule="!has(oldSelf.synchronizedGeneration) || (has(self.synchronizedGeneration) && self.synchronizedGeneration >= oldSelf.synchronizedGeneration) || (oldSelf.authoritativeAPI == 'Migrating' && self.authoritativeAPI != 'Migrating')",message="synchronizedGeneration must not decrease unless authoritativeAPI is transitioning from Migrating to another value" type MachineSetStatus struct { // replicas is the most recently observed number of replicas. + // +optional Replicas int32 `json:"replicas"` // The number of replicas that have labels matching the labels of the machine template of the MachineSet. // +optional @@ -153,6 +154,7 @@ type MachineSetStatus struct { // conditions defines the current state of the MachineSet // +listType=map // +listMapKey=type + // +optional Conditions []Condition `json:"conditions,omitempty"` // authoritativeAPI is the API that is authoritative for this resource. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go index 812358e89f..b67cb32432 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -213,7 +213,7 @@ type Condition struct { // This should be when the underlying condition changed. If that is not known, then using the time when // the API field changed is acceptable. // +required - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` // The reason for the condition's last transition in CamelCase. // The specific API may choose whether or not this field is considered a guaranteed API. diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go index ba9aae81e2..7763435a9e 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1beta1 @@ -518,8 +518,8 @@ func (in *DiskSettings) DeepCopy() *DiskSettings { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { *out = *in - if in.DeleteOnTermination != nil { - in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + if in.DeprecatedDeleteOnTermination != nil { + in, out := &in.DeprecatedDeleteOnTermination, &out.DeprecatedDeleteOnTermination *out = new(bool) **out = **in } diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go index b2e55376fe..e40d744f60 100644 --- a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -84,7 +84,7 @@ func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { var map_EBSBlockDeviceSpec = map[string]string{ "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", - "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.\n\nDeprecated: setting this field has no effect.", "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", @@ -625,7 +625,7 @@ var map_MachineHealthCheckSpec = map[string]string{ "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", "unhealthyConditions": "unhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", - "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", + "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. Defaults to 100% if not set.", "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", "remediationTemplate": "remediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", } diff --git a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go index 67af0e0828..bc572dc5bf 100644 --- a/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/monitoring/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go index ab6eb72aae..c24667442d 100644 --- a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go index b8308c3f83..e04794e15c 100644 --- a/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/network/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto index 1999f71e8d..c83a2fb174 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto @@ -51,7 +51,6 @@ message EgressRouter { } // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface -// +kubebuilder:validation:Required message EgressRouterAddress { // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. // +required @@ -84,7 +83,6 @@ message EgressRouterList { // Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. // Each config consists of parameters specific to that mode. // +k8s:openapi-gen=true -// +kubebuilder:validation:Required message EgressRouterSpec { // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. // +required diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go index 541c3b5597..d5327ffaf6 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go @@ -49,7 +49,6 @@ type EgressRouter struct { // Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. // Each config consists of parameters specific to that mode. // +k8s:openapi-gen=true -// +kubebuilder:validation:Required type EgressRouterSpec struct { // mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. // +required @@ -176,7 +175,6 @@ type MacvlanConfig struct { } // EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface -// +kubebuilder:validation:Required type EgressRouterAddress struct { // ip is the address to configure on the router's interface. Can be IPv4 or IPv6. // +required @@ -249,7 +247,7 @@ type EgressRouterStatus struct { // +required // +listType=map // +listMapKey=type - Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + Conditions []EgressRouterStatusCondition `json:"conditions" protobuf:"bytes,1,rep,name=conditions"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go index e58d3dfaa0..27b39ffdc7 100644 --- a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go index f1af9dc5f0..73191b6160 100644 --- a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go index 498f78df6d..0d71e3f8b0 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -258,7 +258,10 @@ const ( type OpenShiftControllerManagerConfig struct { metav1.TypeMeta `json:",inline"` - KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` + // KubeClientConfig is no longer being used. + // The field is being ignored by OCM. + // + // KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` // servingInfo describes how to start serving ServingInfo *configv1.HTTPServingInfo `json:"servingInfo"` diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go index 62de55ed49..4d7774caa4 100644 --- a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 @@ -459,7 +459,6 @@ func (in *OpenShiftAPIServerConfig) DeepCopyObject() runtime.Object { func (in *OpenShiftControllerManagerConfig) DeepCopyInto(out *OpenShiftControllerManagerConfig) { *out = *in out.TypeMeta = in.TypeMeta - out.KubeClientConfig = in.KubeClientConfig if in.ServingInfo != nil { in, out := &in.ServingInfo, &out.ServingInfo *out = new(configv1.HTTPServingInfo) diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index 4b0c48a103..3a2141abb9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -124,6 +124,7 @@ type OperatorStatus struct { Version string `json:"version,omitempty"` // readyReplicas indicates how many replicas are ready and at the desired state + // +optional ReadyReplicas int32 `json:"readyReplicas"` // latestAvailableRevision is the deploymentID of the most recent deployment @@ -207,7 +208,7 @@ type OperatorCondition struct { // +required // +kubebuilder:validation:Type=string // +kubebuilder:validation:Format=date-time - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + LastTransitionTime metav1.Time `json:"lastTransitionTime"` Reason string `json:"reason,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index bf103f19bb..7cc22d1e4e 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -26,7 +26,7 @@ type Authentication struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec AuthenticationSpec `json:"spec,omitempty"` + Spec AuthenticationSpec `json:"spec"` // +optional Status AuthenticationStatus `json:"status,omitempty"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index c2f25e4e64..e030a65c86 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -27,7 +27,7 @@ type Console struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +required - Spec ConsoleSpec `json:"spec,omitempty"` + Spec ConsoleSpec `json:"spec"` // +optional Status ConsoleStatus `json:"status,omitempty"` } @@ -203,6 +203,7 @@ type ConfigMapFileReference struct { // +required Key string `json:"key"` } + // FileReferenceSource is used by the console to locate the specified file containing a custom logo. // +kubebuilder:validation:XValidation:rule="has(self.from) && self.from == 'ConfigMap' ? has(self.configMap) : !has(self.configMap)",message="configMap is required when from is 'ConfigMap', and forbidden otherwise." type FileReferenceSource struct { @@ -479,7 +480,6 @@ type PerspectiveVisibility struct { // Perspective defines a perspective that cluster admins want to show/hide in the perspective switcher dropdown // +kubebuilder:validation:XValidation:rule="has(self.id) && self.id != 'dev'? !has(self.pinnedResources) : true",message="pinnedResources is allowed only for dev and forbidden for other perspectives" -// +optional type Perspective struct { // id defines the id of the perspective. // Example: "dev", "admin". diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index b25133a42f..2799904482 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -169,7 +169,6 @@ type AWSCSIDriverConfigSpec struct { KMSKeyARN string `json:"kmsKeyARN,omitempty"` // efsVolumeMetrics sets the configuration for collecting metrics from EFS volumes used by the EFS CSI Driver. - // +openshift:enable:FeatureGate=AWSEFSDriverVolumeMetrics // +optional EFSVolumeMetrics *AWSEFSVolumeMetrics `json:"efsVolumeMetrics,omitempty"` } @@ -348,7 +347,6 @@ type VSphereCSIDriverConfigSpec struct { // Volume snapshot documentation: https://docs.vmware.com/en/VMware-vSphere-Container-Storage-Plug-in/3.0/vmware-vsphere-csp-getting-started/GUID-E0B41C69-7EEB-450F-A73D-5FD2FF39E891.html // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GlobalMaxSnapshotsPerBlockVolume *uint32 `json:"globalMaxSnapshotsPerBlockVolume,omitempty"` @@ -357,7 +355,6 @@ type VSphereCSIDriverConfigSpec struct { // Snapshots for VSAN can not be disabled using this parameter. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GranularMaxSnapshotsPerBlockVolumeInVSAN *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVSAN,omitempty"` @@ -366,7 +363,6 @@ type VSphereCSIDriverConfigSpec struct { // Snapshots for VVOL can not be disabled using this parameter. // +kubebuilder:validation:Minimum=1 // +kubebuilder:validation:Maximum=32 - // +openshift:enable:FeatureGate=VSphereDriverConfiguration // +optional GranularMaxSnapshotsPerBlockVolumeInVVOL *uint32 `json:"granularMaxSnapshotsPerBlockVolumeInVVOL,omitempty"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index 375ec5fb7f..252f3b3990 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -38,7 +38,6 @@ type EtcdSpec struct { // Valid values are "", "Standard" and "Slower". // "" means no opinion and the platform is left to choose a reasonable default // which is subject to change without notice. - // +openshift:enable:FeatureGate=HardwareSpeed // +optional HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` @@ -57,7 +56,8 @@ type EtcdSpec struct { type EtcdStatus struct { StaticPodOperatorStatus `json:",inline"` - HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` + // +optional + HardwareSpeed ControlPlaneHardwareSpeed `json:"controlPlaneHardwareSpeed"` } const ( diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 240ab12c77..35b50a8fbd 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -1401,7 +1401,7 @@ type IngressControllerCaptureHTTPCookieUnion struct { // // +unionDiscriminator // +required - MatchType CookieMatchType `json:"matchType,omitempty"` + MatchType CookieMatchType `json:"matchType"` // name specifies a cookie name. Its value must be a valid HTTP cookie // name as defined in RFC 6265 section 4.1. @@ -1554,7 +1554,6 @@ type IngressControllerHTTPUniqueIdHeaderPolicy struct { // (for example, "X-Forwarded-For") in the desired capitalization. The value // must be a valid HTTP header name as defined in RFC 2616 section 4.2. // -// +optional // +kubebuilder:validation:Pattern="^$|^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" // +kubebuilder:validation:MinLength=0 // +kubebuilder:validation:MaxLength=1024 @@ -2021,17 +2020,21 @@ var ( type IngressControllerStatus struct { // availableReplicas is number of observed available replicas according to the // ingress controller deployment. + // +optional AvailableReplicas int32 `json:"availableReplicas"` // selector is a label selector, in string format, for ingress controller pods // corresponding to the IngressController. The number of matching pods should // equal the value of availableReplicas. + // +optional Selector string `json:"selector"` // domain is the actual domain in use. + // +optional Domain string `json:"domain"` // endpointPublishingStrategy is the actual strategy in use. + // +optional EndpointPublishingStrategy *EndpointPublishingStrategy `json:"endpointPublishingStrategy,omitempty"` // conditions is a list of conditions and their status. @@ -2068,6 +2071,7 @@ type IngressControllerStatus struct { // - False if any of those conditions are unsatisfied. // +listType=map // +listMapKey=type + // +optional Conditions []OperatorCondition `json:"conditions,omitempty"` // tlsProfile is the TLS connection configuration that is in effect. diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index ce00b4b62c..7d468755a1 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -17,7 +17,6 @@ import ( // // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 -// +openshift:compatibility-gen:level=1 type KubeAPIServer struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go index 4c53734d86..8f779db2a6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go +++ b/vendor/github.com/openshift/api/operator/v1/types_machineconfiguration.go @@ -53,9 +53,18 @@ type MachineConfigurationSpec struct { // MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow // for less downtime when doing small configuration updates to the cluster. This configuration // has no effect on cluster upgrades which will still incur node disruption where required. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicy NodeDisruptionPolicyConfig `json:"nodeDisruptionPolicy"` + + // irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that + // cannot be applied to existing nodes. + // When specified, the fields configured with validation overrides will no longer reject changes to those + // respective fields due to them not being able to be applied to existing nodes. + // Only newly provisioned nodes will have these configurations applied. + // Existing nodes will report observed configuration differences in their MachineConfigNode status. + // +openshift:enable:FeatureGate=IrreconcilableMachineConfig + // +optional + IrreconcilableValidationOverrides IrreconcilableValidationOverrides `json:"irreconcilableValidationOverrides,omitempty,omitzero"` } type MachineConfigurationStatus struct { @@ -94,7 +103,6 @@ type MachineConfigurationStatus struct { // nodeDisruptionPolicyStatus status reflects what the latest cluster-validated policies are, // and will be used by the Machine Config Daemon during future node updates. - // +openshift:enable:FeatureGate=NodeDisruptionPolicy // +optional NodeDisruptionPolicyStatus NodeDisruptionPolicyStatus `json:"nodeDisruptionPolicyStatus"` @@ -122,6 +130,40 @@ type MachineConfigurationList struct { Items []MachineConfiguration `json:"items"` } +// IrreconcilableValidationOverridesStorage defines available storage irreconcilable overrides. +// +kubebuilder:validation:Enum=Disks;FileSystems;Raid +type IrreconcilableValidationOverridesStorage string + +const ( + // Disks enables changes to the `spec.config.storage.disks` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageDisks IrreconcilableValidationOverridesStorage = "Disks" + + // FileSystems enables changes to the `spec.config.storage.filesystems` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageFileSystems IrreconcilableValidationOverridesStorage = "FileSystems" + + // Raid enables changes to the `spec.config.storage.raid` section of MachineConfig CRs. + IrreconcilableValidationOverridesStorageRaid IrreconcilableValidationOverridesStorage = "Raid" +) + +// IrreconcilableValidationOverrides holds the irreconcilable validations overrides to be applied on each rendered +// MachineConfig generation. +// +kubebuilder:validation:MinProperties=1 +type IrreconcilableValidationOverrides struct { + // storage can be used to allow making irreconcilable changes to the selected sections under the + // `spec.config.storage` field of MachineConfig CRs + // It must have at least one item, may not exceed 3 items and must not contain duplicates. + // Allowed element values are "Disks", "FileSystems", "Raid" and omitted. + // When contains "Disks" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. + // When contains "FileSystems" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. + // When contains "Raid" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. + // When omitted changes to the `spec.config.storage` section are forbidden. + // +optional + // +listType=set + // +kubebuilder:validation:MinItems=1 + // +kubebuilder:validation:MaxItems=3 + Storage []IrreconcilableValidationOverridesStorage `json:"storage,omitempty,omitzero"` +} + type ManagedBootImages struct { // machineManagers can be used to register machine management resources for boot image updates. The Machine Config Operator // will watch for changes to this list. Only one entry is permitted per type of machine management resource. diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 713939ddbb..111240eecf 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -431,16 +431,14 @@ type OVNKubernetesConfig struct { // v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is 100.64.0.0/16 // +optional V4InternalSubnet string `json:"v4InternalSubnet,omitempty"` // v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // Default is fd98::/64 // +optional V6InternalSubnet string `json:"v6InternalSubnet,omitempty"` @@ -478,11 +476,10 @@ type IPv4OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. // The current default subnet is 100.88.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -493,10 +490,9 @@ type IPv4OVNKubernetesConfig struct { // internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. + // subnet must be larger than the number of nodes. // The current default value is 100.64.0.0/16 - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -512,10 +508,9 @@ type IPv6OVNKubernetesConfig struct { // architecture that connects the cluster routers on each node together to enable // east west traffic. The subnet chosen should not overlap with other networks // specified for OVN-Kubernetes as well as other networks used on the host. - // The value cannot be changed after installation. // When ommitted, this means no opinion and the platform is left to choose a reasonable // default which is subject to change over time. - // The subnet must be large enough to accomadate one IP per node in your cluster + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default subnet is fd97::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -527,9 +522,8 @@ type IPv6OVNKubernetesConfig struct { // internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the // default one is being already used by something else. It must not overlap with // any other subnet being used by OpenShift or by the node network. The size of the - // subnet must be larger than the number of nodes. The value cannot be changed - // after installation. - // The subnet must be large enough to accomadate one IP per node in your cluster + // subnet must be larger than the number of nodes. + // The subnet must be large enough to accommodate one IP per node in your cluster // The current default value is fd98::/64 // The value must be in proper IPV6 CIDR format // Note that IPV6 dual addresses are not permitted @@ -646,7 +640,7 @@ type IPv4GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /29). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is 169.254.169.0/29 + // The current default subnet is 169.254.0.0/17 // The value must be in proper IPV4 CIDR format // +kubebuilder:validation:MaxLength=18 // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 4",message="Subnet must be in valid IPV4 CIDR format" @@ -665,7 +659,7 @@ type IPv6GatewayConfig struct { // OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must // be large enough to accommodate 6 IPs (maximum prefix length /125). // When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. - // The current default subnet is fd69::/125 + // The current default subnet is fd69::/112 // Note that IPV6 dual addresses are not permitted // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).ip().family() == 6",message="Subnet must be in valid IPV6 CIDR format" // +kubebuilder:validation:XValidation:rule="isCIDR(self) && cidr(self).prefixLength() <= 125",message="subnet must be in the range /0 to /125 inclusive" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 1257a66e71..d2e74e6838 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 @@ -2640,6 +2640,27 @@ func (in *InsightsReport) DeepCopy() *InsightsReport { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IrreconcilableValidationOverrides) DeepCopyInto(out *IrreconcilableValidationOverrides) { + *out = *in + if in.Storage != nil { + in, out := &in.Storage, &out.Storage + *out = make([]IrreconcilableValidationOverridesStorage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IrreconcilableValidationOverrides. +func (in *IrreconcilableValidationOverrides) DeepCopy() *IrreconcilableValidationOverrides { + if in == nil { + return nil + } + out := new(IrreconcilableValidationOverrides) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeAPIServer) DeepCopyInto(out *KubeAPIServer) { *out = *in @@ -3221,6 +3242,7 @@ func (in *MachineConfigurationSpec) DeepCopyInto(out *MachineConfigurationSpec) in.StaticPodOperatorSpec.DeepCopyInto(&out.StaticPodOperatorSpec) in.ManagedBootImages.DeepCopyInto(&out.ManagedBootImages) in.NodeDisruptionPolicy.DeepCopyInto(&out.NodeDisruptionPolicy) + in.IrreconcilableValidationOverrides.DeepCopyInto(&out.IrreconcilableValidationOverrides) return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml index 81f2a87a99..a893b0e0f3 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.featuregated-crd-manifests.yaml @@ -69,9 +69,7 @@ clustercsidrivers.operator.openshift.io: Capability: "" Category: "" FeatureGates: - - AWSEFSDriverVolumeMetrics - VSphereConfigurableMaxAllowedBlockVolumesPerNode - - VSphereDriverConfiguration FilenameOperatorName: csi-driver FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_50" @@ -157,7 +155,6 @@ etcds.operator.openshift.io: Category: coreoperators FeatureGates: - EtcdBackendQuota - - HardwareSpeed FilenameOperatorName: etcd FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_12" @@ -308,8 +305,8 @@ machineconfigurations.operator.openshift.io: Capability: "" Category: "" FeatureGates: + - IrreconcilableMachineConfig - ManagedBootImages - - NodeDisruptionPolicy FilenameOperatorName: machine-config FilenameOperatorOrdering: "01" FilenameRunLevel: "0000_80" diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index a0fa4fe475..206dd98c43 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -1379,6 +1379,15 @@ func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { return map_KubeStorageVersionMigratorList } +var map_IrreconcilableValidationOverrides = map[string]string{ + "": "IrreconcilableValidationOverrides holds the irreconcilable validations overrides to be applied on each rendered MachineConfig generation.", + "storage": "storage can be used to allow making irreconcilable changes to the selected sections under the `spec.config.storage` field of MachineConfig CRs It must have at least one item, may not exceed 3 items and must not contain duplicates. Allowed element values are \"Disks\", \"FileSystems\", \"Raid\" and omitted. When contains \"Disks\" changes to the `spec.config.storage.disks` section of MachineConfig CRs are allowed. When contains \"FileSystems\" changes to the `spec.config.storage.filesystems` section of MachineConfig CRs are allowed. When contains \"Raid\" changes to the `spec.config.storage.raid` section of MachineConfig CRs are allowed. When omitted changes to the `spec.config.storage` section are forbidden.", +} + +func (IrreconcilableValidationOverrides) SwaggerDoc() map[string]string { + return map_IrreconcilableValidationOverrides +} + var map_MachineConfiguration = map[string]string{ "": "MachineConfiguration provides information to configure an operator to manage Machine Configuration.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", @@ -1401,8 +1410,9 @@ func (MachineConfigurationList) SwaggerDoc() map[string]string { } var map_MachineConfigurationSpec = map[string]string{ - "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", - "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", + "managedBootImages": "managedBootImages allows configuration for the management of boot images for machine resources within the cluster. This configuration allows users to select resources that should be updated to the latest boot images during cluster upgrades, ensuring that new machines always boot with the current cluster version's boot image. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default for each machine manager mode is All for GCP and AWS platforms, and None for all other platforms.", + "nodeDisruptionPolicy": "nodeDisruptionPolicy allows an admin to set granular node disruption actions for MachineConfig-based updates, such as drains, service reloads, etc. Specifying this will allow for less downtime when doing small configuration updates to the cluster. This configuration has no effect on cluster upgrades which will still incur node disruption where required.", + "irreconcilableValidationOverrides": "irreconcilableValidationOverrides is an optional field that can used to make changes to a MachineConfig that cannot be applied to existing nodes. When specified, the fields configured with validation overrides will no longer reject changes to those respective fields due to them not being able to be applied to existing nodes. Only newly provisioned nodes will have these configurations applied. Existing nodes will report observed configuration differences in their MachineConfigNode status.", } func (MachineConfigurationSpec) SwaggerDoc() map[string]string { @@ -1712,7 +1722,7 @@ func (IPsecFullModeConfig) SwaggerDoc() map[string]string { var map_IPv4GatewayConfig = map[string]string{ "": "IPV4GatewayConfig holds the configuration paramaters for IPV4 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.169.0/29 The value must be in proper IPV4 CIDR format", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV4 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /29). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 169.254.0.0/17 The value must be in proper IPV4 CIDR format", } func (IPv4GatewayConfig) SwaggerDoc() map[string]string { @@ -1720,8 +1730,8 @@ func (IPv4GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv4OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", - "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The current default value is 100.64.0.0/16 The subnet must be large enough to accomadate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is 100.88.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", + "internalJoinSubnet": "internalJoinSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The current default value is 100.64.0.0/16 The subnet must be large enough to accommodate one IP per node in your cluster The value must be in proper IPV4 CIDR format", } func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1730,7 +1740,7 @@ func (IPv4OVNKubernetesConfig) SwaggerDoc() map[string]string { var map_IPv6GatewayConfig = map[string]string{ "": "IPV6GatewayConfig holds the configuration paramaters for IPV6 connections in the GatewayConfig for OVN-Kubernetes", - "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/125 Note that IPV6 dual addresses are not permitted", + "internalMasqueradeSubnet": "internalMasqueradeSubnet contains the masquerade addresses in IPV6 CIDR format used internally by ovn-kubernetes to enable host to service traffic. Each host in the cluster is configured with these addresses, as well as the shared gateway bridge interface. The values can be changed after installation. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. Additionally the subnet must be large enough to accommodate 6 IPs (maximum prefix length /125). When omitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The current default subnet is fd69::/112 Note that IPV6 dual addresses are not permitted", } func (IPv6GatewayConfig) SwaggerDoc() map[string]string { @@ -1738,8 +1748,8 @@ func (IPv6GatewayConfig) SwaggerDoc() map[string]string { } var map_IPv6OVNKubernetesConfig = map[string]string{ - "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. The value cannot be changed after installation. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accomadate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", - "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. The subnet must be large enough to accomadate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalTransitSwitchSubnet": "internalTransitSwitchSubnet is a v4 subnet in IPV4 CIDR format used internally by OVN-Kubernetes for the distributed transit switch in the OVN Interconnect architecture that connects the cluster routers on each node together to enable east west traffic. The subnet chosen should not overlap with other networks specified for OVN-Kubernetes as well as other networks used on the host. When ommitted, this means no opinion and the platform is left to choose a reasonable default which is subject to change over time. The subnet must be large enough to accommodate one IP per node in your cluster The current default subnet is fd97::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", + "internalJoinSubnet": "internalJoinSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The subnet must be large enough to accommodate one IP per node in your cluster The current default value is fd98::/64 The value must be in proper IPV6 CIDR format Note that IPV6 dual addresses are not permitted", } func (IPv6OVNKubernetesConfig) SwaggerDoc() map[string]string { @@ -1840,8 +1850,8 @@ var map_OVNKubernetesConfig = map[string]string{ "ipsecConfig": "ipsecConfig enables and configures IPsec for pods on the pod network within the cluster.", "policyAuditConfig": "policyAuditConfig is the configuration for network policy audit events. If unset, reported defaults are used.", "gatewayConfig": "gatewayConfig holds the configuration for node gateway options.", - "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is 100.64.0.0/16", - "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. The value cannot be changed after installation. Default is fd98::/64", + "v4InternalSubnet": "v4InternalSubnet is a v4 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is 100.64.0.0/16", + "v6InternalSubnet": "v6InternalSubnet is a v6 subnet used internally by ovn-kubernetes in case the default one is being already used by something else. It must not overlap with any other subnet being used by OpenShift or by the node network. The size of the subnet must be larger than the number of nodes. Default is fd98::/64", "egressIPConfig": "egressIPConfig holds the configuration for EgressIP options.", "ipv4": "ipv4 allows users to configure IP settings for IPv4 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", "ipv6": "ipv6 allows users to configure IP settings for IPv6 connections. When ommitted, this means no opinions and the default configuration is used. Check individual fields within ipv4 for details of default values.", diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go index fe56b0eab2..dc5d81cea1 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go @@ -41,7 +41,6 @@ type EtcdBackupSpec struct { PVCName string `json:"pvcName"` } -// +kubebuilder:validation:Optional type EtcdBackupStatus struct { // conditions provide details on the status of the etcd backup job. // +listType=map diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index f8daa0576b..de4c071281 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go index 26431d8c1b..d5276d664c 100644 --- a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go index cb90b8365d..7d72345345 100644 --- a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go index ddbdda971d..b712a48118 100644 --- a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go index 72ac882fbd..3ef29ce033 100644 --- a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto index adfce0d19f..e055eb0d26 100644 --- a/vendor/github.com/openshift/api/route/v1/generated.proto +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -366,6 +366,7 @@ message RouteStatus { // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` // +listType=atomic + // +optional repeated RouteIngress ingress = 1; } diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go index 2feb425a2c..5a61f477e7 100644 --- a/vendor/github.com/openshift/api/route/v1/types.go +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -354,6 +354,7 @@ type RouteStatus struct { // ingress points may contain duplicate Host or RouterName values. Routes // are considered live once they are `Ready` // +listType=atomic + // +optional Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` } diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go index 23a2edd423..155348d361 100644 --- a/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto index eeda6835ae..95220f74b5 100644 --- a/vendor/github.com/openshift/api/samples/v1/generated.proto +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -133,12 +133,14 @@ message ConfigStatus { // the operator back on (i.e. "Managed") when it was previously "Unmanaged". // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string managementState = 1; // conditions represents the available maintenance status of the sample // imagestreams and templates. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated ConfigCondition conditions = 2; // samplesRegistry allows for the specification of which registry is accessed @@ -147,12 +149,14 @@ message ConfigStatus { // defaults to registry.redhat.io. // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string samplesRegistry = 3; // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the // supported choices. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string architectures = 5; // skippedImagestreams specifies names of image streams that should NOT be @@ -162,6 +166,7 @@ message ConfigStatus { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string skippedImagestreams = 6; // skippedTemplates specifies names of templates that should NOT be @@ -171,11 +176,13 @@ message ConfigStatus { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional repeated string skippedTemplates = 7; // version is the value of the operator's payload based version indicator when it was last successfully processed // +patchMergeKey=type // +patchStrategy=merge + // +optional optional string version = 8; } diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go index 320500b0ce..8d2cf7df50 100644 --- a/vendor/github.com/openshift/api/samples/v1/types_config.go +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -104,11 +104,13 @@ type ConfigStatus struct { // the operator back on (i.e. "Managed") when it was previously "Unmanaged". // +patchMergeKey=type // +patchStrategy=merge + // +optional ManagementState operatorv1.ManagementState `json:"managementState,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=managementState"` // conditions represents the available maintenance status of the sample // imagestreams and templates. // +patchMergeKey=type // +patchStrategy=merge + // +optional Conditions []ConfigCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` // samplesRegistry allows for the specification of which registry is accessed @@ -117,12 +119,14 @@ type ConfigStatus struct { // defaults to registry.redhat.io. // +patchMergeKey=type // +patchStrategy=merge + // +optional SamplesRegistry string `json:"samplesRegistry,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=samplesRegistry"` // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the // supported choices. // +patchMergeKey=type // +patchStrategy=merge + // +optional Architectures []string `json:"architectures,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=architectures"` // skippedImagestreams specifies names of image streams that should NOT be @@ -132,6 +136,7 @@ type ConfigStatus struct { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional SkippedImagestreams []string `json:"skippedImagestreams,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=skippedImagestreams"` // skippedTemplates specifies names of templates that should NOT be @@ -141,11 +146,13 @@ type ConfigStatus struct { // listed here. // +patchMergeKey=type // +patchStrategy=merge + // +optional SkippedTemplates []string `json:"skippedTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,7,rep,name=skippedTemplates"` // version is the value of the operator's payload based version indicator when it was last successfully processed // +patchMergeKey=type // +patchStrategy=merge + // +optional Version string `json:"version,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=version"` } diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go index 30979c0486..115712bebd 100644 --- a/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto index 0e6bb094fb..933de5450e 100644 --- a/vendor/github.com/openshift/api/security/v1/generated.proto +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -73,6 +73,7 @@ message PodSecurityPolicyReviewSpec { // PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. message PodSecurityPolicyReviewStatus { // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + // +optional repeated ServiceAccountPodSecurityPolicyReviewStatus allowedServiceAccounts = 1; } @@ -134,14 +135,17 @@ message PodSecurityPolicySubjectReviewStatus { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. + // +optional optional .k8s.io.api.core.v1.ObjectReference allowedBy = 1; // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. + // +optional optional string reason = 2; // template is the PodTemplateSpec after the defaulting is applied. + // +optional optional .k8s.io.api.core.v1.PodTemplateSpec template = 3; } diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go index 18585e97c0..fb491480d7 100644 --- a/vendor/github.com/openshift/api/security/v1/types.go +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -386,14 +386,17 @@ type PodSecurityPolicySubjectReviewStatus struct { // allowedBy is a reference to the rule that allows the PodTemplateSpec. // A rule can be a SecurityContextConstraint or a PodSecurityPolicy // A `nil`, indicates that it was denied. + // +optional AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"` // A machine-readable description of why this operation is in the // "Failure" status. If this value is empty there // is no information available. + // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` // template is the PodTemplateSpec after the defaulting is applied. + // +optional Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` } @@ -465,6 +468,7 @@ type PodSecurityPolicyReviewSpec struct { // PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. type PodSecurityPolicyReviewStatus struct { // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + // +optional AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"` } diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go index 66e8b5a21c..d6263fc025 100644 --- a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go index 2661c23aa5..58d9447a1a 100644 --- a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go index 2a4a0d1b6c..51466b0d4e 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go @@ -47,7 +47,7 @@ type SharedConfigMap struct { // spec is the specification of the desired shared configmap // +required - Spec SharedConfigMapSpec `json:"spec,omitempty"` + Spec SharedConfigMapSpec `json:"spec"` // status is the observed status of the shared configmap Status SharedConfigMapStatus `json:"status,omitempty"` diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go index be06f97749..9551fb4a77 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go @@ -46,7 +46,7 @@ type SharedSecret struct { // spec is the specification of the desired shared secret // +required - Spec SharedSecretSpec `json:"spec,omitempty"` + Spec SharedSecretSpec `json:"spec"` // status is the observed status of the shared secret Status SharedSecretStatus `json:"status,omitempty"` diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go index 25ecd38363..d8ed6e9069 100644 --- a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1alpha1 diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto index 8f27eb48a0..e1d7a21d46 100644 --- a/vendor/github.com/openshift/api/template/v1/generated.proto +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -241,9 +241,11 @@ message TemplateInstanceSpec { message TemplateInstanceStatus { // conditions represent the latest available observations of a // TemplateInstance's current state. + // +optional repeated TemplateInstanceCondition conditions = 1; // objects references the objects created by the TemplateInstance. + // +optional repeated TemplateInstanceObject objects = 2; } diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go index 5510b0f90b..e3dee62fae 100644 --- a/vendor/github.com/openshift/api/template/v1/types.go +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -179,9 +179,11 @@ func (t ExtraValue) String() string { type TemplateInstanceStatus struct { // conditions represent the latest available observations of a // TemplateInstance's current state. + // +optional Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` // objects references the objects created by the TemplateInstance. + // +optional Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` } diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go index ff14f246bd..7cefe48637 100644 --- a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go index e6b2fb867c..b32e715a60 100644 --- a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go @@ -1,7 +1,7 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by deepcopy-gen. DO NOT EDIT. +// Code generated by codegen. DO NOT EDIT. package v1 diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go index 4d4b4aad6f..7e19eba090 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go @@ -7,10 +7,13 @@ import ( "time" ) -type CompareType int +// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it. +type CompareType = compareResult + +type compareResult int const ( - compareLess CompareType = iota - 1 + compareLess compareResult = iota - 1 compareEqual compareGreater ) @@ -39,7 +42,7 @@ var ( bytesType = reflect.TypeOf([]byte{}) ) -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { +func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) { obj1Value := reflect.ValueOf(obj1) obj2Value := reflect.ValueOf(obj2) @@ -325,7 +328,13 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time) } - return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64) + if timeObj1.Before(timeObj2) { + return compareLess, true + } + if timeObj1.Equal(timeObj2) { + return compareEqual, true + } + return compareGreater, true } case reflect.Slice: { @@ -345,7 +354,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte) } - return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true + return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true } case reflect.Uintptr: { @@ -381,7 +390,7 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // GreaterOrEqual asserts that the first element is greater than or equal to the second @@ -394,7 +403,7 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // Less asserts that the first element is less than the second @@ -406,7 +415,7 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // LessOrEqual asserts that the first element is less than or equal to the second @@ -419,7 +428,7 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter if h, ok := t.(tHelper); ok { h.Helper() } - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } // Positive asserts that the specified element is positive @@ -431,7 +440,7 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) } // Negative asserts that the specified element is negative @@ -443,10 +452,10 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool { h.Helper() } zero := reflect.Zero(reflect.TypeOf(e)) - return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...) + return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) } -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } @@ -469,7 +478,7 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare return true } -func containsValue(values []CompareType, value CompareType) bool { +func containsValue(values []compareResult, value compareResult) bool { for _, v := range values { if v == value { return true diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go index 3ddab109ad..1906341657 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go @@ -104,8 +104,8 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -186,7 +186,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() @@ -568,6 +568,23 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) } +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) +} + // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -604,7 +621,16 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...) +} + +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool { if h, ok := t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go index a84e09bd40..21629087ba 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -186,8 +186,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface return EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { @@ -197,8 +197,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn return EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { @@ -336,7 +336,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -361,7 +361,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1128,6 +1128,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin return NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1200,7 +1234,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str return NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + return NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool { if h, ok := a.t.(tHelper); ok { @@ -1209,7 +1261,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface return NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go index 00df62a059..1d2f71824a 100644 --- a/vendor/github.com/stretchr/testify/assert/assertion_order.go +++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go @@ -6,7 +6,7 @@ import ( ) // isOrdered checks that collection contains orderable elements. -func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { +func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { objKind := reflect.TypeOf(object).Kind() if objKind != reflect.Slice && objKind != reflect.Array { return false @@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT // assert.IsIncreasing(t, []float{1, 2}) // assert.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) } // IsNonIncreasing asserts that the collection is not increasing @@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonIncreasing(t, []float{2, 1}) // assert.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) } // IsDecreasing asserts that the collection is decreasing @@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // assert.IsDecreasing(t, []float{2, 1}) // assert.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) } // IsNonDecreasing asserts that the collection is not decreasing @@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo // assert.IsNonDecreasing(t, []float{1, 2}) // assert.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) + return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) } diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index 0b7570f21c..4e91332bb5 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -19,7 +19,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/pmezard/go-difflib/difflib" - "gopkg.in/yaml.v3" + + // Wrapper around gopkg.in/yaml.v3 + "github.com/stretchr/testify/assert/yaml" ) //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" @@ -45,6 +47,10 @@ type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool // for table driven tests. type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool +// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful +// for table driven tests. +type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool + // Comparison is a custom function that returns true on success and false on failure type Comparison func() (success bool) @@ -496,7 +502,13 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b h.Helper() } - if !samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + return Fail(t, "Both arguments must be pointers", msgAndArgs...) + } + + if !same { + // both are pointers but not the same type & pointing to the same address return Fail(t, fmt.Sprintf("Not same: \n"+ "expected: %p %#v\n"+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) @@ -516,7 +528,13 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} h.Helper() } - if samePointers(expected, actual) { + same, ok := samePointers(expected, actual) + if !ok { + //fails when the arguments are not pointers + return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) + } + + if same { return Fail(t, fmt.Sprintf( "Expected and actual point to the same object: %p %#v", expected, expected), msgAndArgs...) @@ -524,21 +542,23 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{} return true } -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { +// samePointers checks if two generic interface objects are pointers of the same +// type pointing to the same object. It returns two values: same indicating if +// they are the same type and point to the same object, and ok indicating that +// both inputs are pointers. +func samePointers(first, second interface{}) (same bool, ok bool) { firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false + return false, false //not both are pointers } firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) if firstType != secondType { - return false + return false, true // both are pointers, but of different types } // compare pointer addresses - return first == second + return first == second, true } // formatUnequalValues takes two values of arbitrary types and returns string @@ -572,8 +592,8 @@ func truncatingFormat(data interface{}) string { return value } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // assert.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { @@ -615,21 +635,6 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs .. return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) } - if aType.Kind() == reflect.Ptr { - aType = aType.Elem() - } - if bType.Kind() == reflect.Ptr { - bType = bType.Elem() - } - - if aType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) - } - - if bType.Kind() != reflect.Struct { - return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) - } - expected = copyExportedFields(expected) actual = copyExportedFields(actual) @@ -1170,6 +1175,39 @@ func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) stri return msg.String() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if isEmpty(listA) && isEmpty(listB) { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + if !isList(t, listA, msgAndArgs...) { + return Fail(t, "listA is not a list type", msgAndArgs...) + } + if !isList(t, listB, msgAndArgs...) { + return Fail(t, "listB is not a list type", msgAndArgs...) + } + + extraA, extraB := diffLists(listA, listB) + if len(extraA) == 0 && len(extraB) == 0 { + return Fail(t, "listA and listB contain the same elements", msgAndArgs) + } + + return true +} + // Condition uses a Comparison to assert a complex condition. func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -1488,6 +1526,9 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd if err != nil { return Fail(t, err.Error(), msgAndArgs...) } + if math.IsNaN(actualEpsilon) { + return Fail(t, "relative error is NaN", msgAndArgs...) + } if actualEpsilon > epsilon { return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) @@ -1611,7 +1652,6 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // matchRegexp return true if a specified regexp matches a string. func matchRegexp(rx interface{}, str interface{}) bool { - var r *regexp.Regexp if rr, ok := rx.(*regexp.Regexp); ok { r = rr @@ -1619,7 +1659,14 @@ func matchRegexp(rx interface{}, str interface{}) bool { r = regexp.MustCompile(fmt.Sprint(rx)) } - return (r.FindStringIndex(fmt.Sprint(str)) != nil) + switch v := str.(type) { + case []byte: + return r.Match(v) + case string: + return r.MatchString(v) + default: + return r.MatchString(fmt.Sprint(v)) + } } @@ -1872,7 +1919,7 @@ var spewConfigStringerEnabled = spew.ConfigState{ MaxDepth: 10, } -type tHelper interface { +type tHelper = interface { Helper() } @@ -1911,6 +1958,9 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // CollectT implements the TestingT interface and collects all errors. type CollectT struct { + // A slice of errors. Non-nil slice denotes a failure. + // If it's non-nil but len(c.errors) == 0, this is also a failure + // obtained by direct c.FailNow() call. errors []error } @@ -1919,9 +1969,10 @@ func (c *CollectT) Errorf(format string, args ...interface{}) { c.errors = append(c.errors, fmt.Errorf(format, args...)) } -// FailNow panics. -func (*CollectT) FailNow() { - panic("Assertion failed") +// FailNow stops execution by calling runtime.Goexit. +func (c *CollectT) FailNow() { + c.fail() + runtime.Goexit() } // Deprecated: That was a method for internal usage that should not have been published. Now just panics. @@ -1934,6 +1985,16 @@ func (*CollectT) Copy(TestingT) { panic("Copy() is deprecated") } +func (c *CollectT) fail() { + if !c.failed() { + c.errors = []error{} // Make it non-nil to mark a failure. + } +} + +func (c *CollectT) failed() bool { + return c.errors != nil +} + // EventuallyWithT asserts that given condition will be met in waitFor time, // periodically checking target function each tick. In contrast to Eventually, // it supplies a CollectT to the condition function, so that the condition @@ -1951,14 +2012,14 @@ func (*CollectT) Copy(TestingT) { // assert.EventuallyWithT(t, func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { h.Helper() } var lastFinishedTickErrs []error - ch := make(chan []error, 1) + ch := make(chan *CollectT, 1) timer := time.NewTimer(waitFor) defer timer.Stop() @@ -1978,16 +2039,16 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time go func() { collect := new(CollectT) defer func() { - ch <- collect.errors + ch <- collect }() condition(collect) }() - case errs := <-ch: - if len(errs) == 0 { + case collect := <-ch: + if !collect.failed() { return true } // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. - lastFinishedTickErrs = errs + lastFinishedTickErrs = collect.errors tick = ticker.C } } @@ -2049,7 +2110,7 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { ), msgAndArgs...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool { if h, ok := t.(tHelper); ok { @@ -2090,6 +2151,24 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{ ), msgAndArgs...) } +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if !errors.As(err, target) { + return true + } + + chain := buildErrorChainString(err) + + return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ + "found: %q\n"+ + "in chain: %s", target, chain, + ), msgAndArgs...) +} + func buildErrorChainString(err error) string { if err == nil { return "" diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go new file mode 100644 index 0000000000..baa0cc7d7f --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go @@ -0,0 +1,25 @@ +//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default +// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that calls a pluggable implementation. +// +// This implementation is selected with the testify_yaml_custom build tag. +// +// go test -tags testify_yaml_custom +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]. +// +// In your test package: +// +// import assertYaml "github.com/stretchr/testify/assert/yaml" +// +// func init() { +// assertYaml.Unmarshal = func (in []byte, out interface{}) error { +// // ... +// return nil +// } +// } +package yaml + +var Unmarshal func(in []byte, out interface{}) error diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go new file mode 100644 index 0000000000..b83c6cf64c --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go @@ -0,0 +1,37 @@ +//go:build !testify_yaml_fail && !testify_yaml_custom +// +build !testify_yaml_fail,!testify_yaml_custom + +// Package yaml is just an indirection to handle YAML deserialization. +// +// This package is just an indirection that allows the builder to override the +// indirection with an alternative implementation of this package that uses +// another implementation of YAML deserialization. This allows to not either not +// use YAML deserialization at all, or to use another implementation than +// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]). +// +// Alternative implementations are selected using build tags: +// +// - testify_yaml_fail: [Unmarshal] always fails with an error +// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it +// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or +// [github.com/stretchr/testify/assert.YAMLEqf]. +// +// Usage: +// +// go test -tags testify_yaml_fail +// +// You can check with "go list" which implementation is linked: +// +// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml +// +// [PR #1120]: https://github.com/stretchr/testify/pull/1120 +package yaml + +import goyaml "gopkg.in/yaml.v3" + +// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal]. +func Unmarshal(in []byte, out interface{}) error { + return goyaml.Unmarshal(in, out) +} diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go new file mode 100644 index 0000000000..e78f7dfe69 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go @@ -0,0 +1,18 @@ +//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default +// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default + +// Package yaml is an implementation of YAML functions that always fail. +// +// This implementation can be used at build time to replace the default implementation +// to avoid linking with [gopkg.in/yaml.v3]: +// +// go test -tags testify_yaml_fail +package yaml + +import "errors" + +var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)") + +func Unmarshal([]byte, interface{}) error { + return errNotImplemented +} diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go index 506a82f807..d8921950d7 100644 --- a/vendor/github.com/stretchr/testify/require/require.go +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -34,9 +34,9 @@ func Conditionf(t TestingT, comp assert.Comparison, msg string, args ...interfac // Contains asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") +// require.Contains(t, "Hello World", "World") +// require.Contains(t, ["Hello", "World"], "World") +// require.Contains(t, {"Hello": "World"}, "Hello") func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -50,9 +50,9 @@ func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...int // Containsf asserts that the specified string, list(array, slice...) or map contains the // specified substring or element. // -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") +// require.Containsf(t, "Hello World", "World", "error message %s", "formatted") +// require.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") +// require.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -91,7 +91,7 @@ func DirExistsf(t TestingT, path string, msg string, args ...interface{}) { // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) +// require.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -106,7 +106,7 @@ func ElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs // listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, // the number of appearances of each of them in both lists should match. // -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") +// require.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -120,7 +120,7 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string // Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Empty(t, obj) +// require.Empty(t, obj) func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -134,7 +134,7 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // a slice or a channel with len == 0. // -// assert.Emptyf(t, obj, "error message %s", "formatted") +// require.Emptyf(t, obj, "error message %s", "formatted") func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -147,7 +147,7 @@ func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { // Equal asserts that two objects are equal. // -// assert.Equal(t, 123, 123) +// require.Equal(t, 123, 123) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -166,7 +166,7 @@ func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...i // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) +// require.EqualError(t, err, expectedErrorString) func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -181,7 +181,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte // and that it is equal to the provided error. // // actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") +// require.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -200,8 +200,8 @@ func EqualErrorf(t TestingT, theError error, errString string, msg string, args // Exported int // notExported int // } -// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true -// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false +// require.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true +// require.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -220,8 +220,8 @@ func EqualExportedValues(t TestingT, expected interface{}, actual interface{}, m // Exported int // notExported int // } -// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true -// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false +// require.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true +// require.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -232,10 +232,10 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, t.FailNow() } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValues(t, uint32(123), int32(123)) +// require.EqualValues(t, uint32(123), int32(123)) func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -246,10 +246,10 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg t.FailNow() } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") +// require.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -262,7 +262,7 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri // Equalf asserts that two objects are equal. // -// assert.Equalf(t, 123, 123, "error message %s", "formatted") +// require.Equalf(t, 123, 123, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). Function equality @@ -280,8 +280,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar // Error asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) +// if require.Error(t, err) { +// require.Equal(t, expectedError, err) // } func Error(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -321,7 +321,7 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContains(t, err, expectedErrorSubString) +// require.ErrorContains(t, err, expectedErrorSubString) func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -336,7 +336,7 @@ func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...in // and that the error contains the specified substring. // // actualObj, err := SomeFunction() -// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") +// require.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted") func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -374,8 +374,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface // Errorf asserts that a function returned an error (i.e. not `nil`). // // actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) +// if require.Errorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedErrorf, err) // } func Errorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -390,7 +390,7 @@ func Errorf(t TestingT, err error, msg string, args ...interface{}) { // Eventually asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) +// require.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -415,10 +415,10 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithT(t, func(c *assert.CollectT) { +// require.EventuallyWithT(t, func(c *require.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -443,10 +443,10 @@ func EventuallyWithT(t TestingT, condition func(collect *assert.CollectT), waitF // time.Sleep(8*time.Second) // externalValue = true // }() -// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") { +// require.EventuallyWithTf(t, func(c *require.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick -// assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// require.True(c, externalValue, "expected 'externalValue' to be true") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -460,7 +460,7 @@ func EventuallyWithTf(t TestingT, condition func(collect *assert.CollectT), wait // Eventuallyf asserts that given condition will be met in waitFor time, // periodically checking target function each tick. // -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -473,7 +473,7 @@ func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick // Exactly asserts that two objects are equal in value and type. // -// assert.Exactly(t, int32(123), int64(123)) +// require.Exactly(t, int32(123), int64(123)) func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -486,7 +486,7 @@ func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // Exactlyf asserts that two objects are equal in value and type. // -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") +// require.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -543,7 +543,7 @@ func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) { // False asserts that the specified value is false. // -// assert.False(t, myBool) +// require.False(t, myBool) func False(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -556,7 +556,7 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) { // Falsef asserts that the specified value is false. // -// assert.Falsef(t, myBool, "error message %s", "formatted") +// require.Falsef(t, myBool, "error message %s", "formatted") func Falsef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -593,9 +593,9 @@ func FileExistsf(t TestingT, path string, msg string, args ...interface{}) { // Greater asserts that the first element is greater than the second // -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") +// require.Greater(t, 2, 1) +// require.Greater(t, float64(2), float64(1)) +// require.Greater(t, "b", "a") func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -608,10 +608,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface // GreaterOrEqual asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") +// require.GreaterOrEqual(t, 2, 1) +// require.GreaterOrEqual(t, 2, 2) +// require.GreaterOrEqual(t, "b", "a") +// require.GreaterOrEqual(t, "b", "b") func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -624,10 +624,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in // GreaterOrEqualf asserts that the first element is greater than or equal to the second // -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") +// require.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") +// require.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -640,9 +640,9 @@ func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, arg // Greaterf asserts that the first element is greater than the second // -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") +// require.Greaterf(t, 2, 1, "error message %s", "formatted") +// require.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") +// require.Greaterf(t, "b", "a", "error message %s", "formatted") func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -656,7 +656,7 @@ func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...in // HTTPBodyContains asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -672,7 +672,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url s // HTTPBodyContainsf asserts that a specified handler returns a // body that contains a string. // -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -688,7 +688,7 @@ func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url // HTTPBodyNotContains asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") +// require.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) { @@ -704,7 +704,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, ur // HTTPBodyNotContainsf asserts that a specified handler returns a // body that does not contain a string. // -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") +// require.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) { @@ -719,7 +719,7 @@ func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, u // HTTPError asserts that a specified handler returns an error status code. // -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -734,7 +734,7 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPErrorf asserts that a specified handler returns an error status code. // -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -749,7 +749,7 @@ func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, // HTTPRedirect asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -764,7 +764,7 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url strin // HTTPRedirectf asserts that a specified handler returns a redirect status code. // -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// require.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} // // Returns whether the assertion was successful (true) or not (false). func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -779,7 +779,7 @@ func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url stri // HTTPStatusCode asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) +// require.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) { @@ -794,7 +794,7 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method string, url str // HTTPStatusCodef asserts that a specified handler returns a specified status code. // -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") +// require.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) { @@ -809,7 +809,7 @@ func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url st // HTTPSuccess asserts that a specified handler returns a success status code. // -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// require.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) { @@ -824,7 +824,7 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string // HTTPSuccessf asserts that a specified handler returns a success status code. // -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") +// require.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") // // Returns whether the assertion was successful (true) or not (false). func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) { @@ -839,7 +839,7 @@ func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url strin // Implements asserts that an object is implemented by the specified interface. // -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) +// require.Implements(t, (*MyInterface)(nil), new(MyObject)) func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -852,7 +852,7 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg // Implementsf asserts that an object is implemented by the specified interface. // -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -865,7 +865,7 @@ func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, ms // InDelta asserts that the two numerals are within delta of each other. // -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) +// require.InDelta(t, math.Pi, 22/7.0, 0.01) func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -922,7 +922,7 @@ func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta f // InDeltaf asserts that the two numerals are within delta of each other. // -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") +// require.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -979,9 +979,9 @@ func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon fl // IsDecreasing asserts that the collection is decreasing // -// assert.IsDecreasing(t, []int{2, 1, 0}) -// assert.IsDecreasing(t, []float{2, 1}) -// assert.IsDecreasing(t, []string{"b", "a"}) +// require.IsDecreasing(t, []int{2, 1, 0}) +// require.IsDecreasing(t, []float{2, 1}) +// require.IsDecreasing(t, []string{"b", "a"}) func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -994,9 +994,9 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsDecreasingf asserts that the collection is decreasing // -// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted") +// require.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1009,9 +1009,9 @@ func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface // IsIncreasing asserts that the collection is increasing // -// assert.IsIncreasing(t, []int{1, 2, 3}) -// assert.IsIncreasing(t, []float{1, 2}) -// assert.IsIncreasing(t, []string{"a", "b"}) +// require.IsIncreasing(t, []int{1, 2, 3}) +// require.IsIncreasing(t, []float{1, 2}) +// require.IsIncreasing(t, []string{"a", "b"}) func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1024,9 +1024,9 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { // IsIncreasingf asserts that the collection is increasing // -// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted") +// require.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1039,9 +1039,9 @@ func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface // IsNonDecreasing asserts that the collection is not decreasing // -// assert.IsNonDecreasing(t, []int{1, 1, 2}) -// assert.IsNonDecreasing(t, []float{1, 2}) -// assert.IsNonDecreasing(t, []string{"a", "b"}) +// require.IsNonDecreasing(t, []int{1, 1, 2}) +// require.IsNonDecreasing(t, []float{1, 2}) +// require.IsNonDecreasing(t, []string{"a", "b"}) func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1054,9 +1054,9 @@ func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonDecreasingf asserts that the collection is not decreasing // -// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") -// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted") +// require.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted") func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1069,9 +1069,9 @@ func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interf // IsNonIncreasing asserts that the collection is not increasing // -// assert.IsNonIncreasing(t, []int{2, 1, 1}) -// assert.IsNonIncreasing(t, []float{2, 1}) -// assert.IsNonIncreasing(t, []string{"b", "a"}) +// require.IsNonIncreasing(t, []int{2, 1, 1}) +// require.IsNonIncreasing(t, []float{2, 1}) +// require.IsNonIncreasing(t, []string{"b", "a"}) func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1084,9 +1084,9 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) // IsNonIncreasingf asserts that the collection is not increasing // -// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") -// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted") +// require.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted") func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1121,7 +1121,7 @@ func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg strin // JSONEq asserts that two JSON strings are equivalent. // -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// require.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1134,7 +1134,7 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{ // JSONEqf asserts that two JSON strings are equivalent. // -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") +// require.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1148,7 +1148,7 @@ func JSONEqf(t TestingT, expected string, actual string, msg string, args ...int // Len asserts that the specified object has specific length. // Len also fails if the object has a type that len() not accept. // -// assert.Len(t, mySlice, 3) +// require.Len(t, mySlice, 3) func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1162,7 +1162,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) // Lenf asserts that the specified object has specific length. // Lenf also fails if the object has a type that len() not accept. // -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") +// require.Lenf(t, mySlice, 3, "error message %s", "formatted") func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1175,9 +1175,9 @@ func Lenf(t TestingT, object interface{}, length int, msg string, args ...interf // Less asserts that the first element is less than the second // -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") +// require.Less(t, 1, 2) +// require.Less(t, float64(1), float64(2)) +// require.Less(t, "a", "b") func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1190,10 +1190,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) // LessOrEqual asserts that the first element is less than or equal to the second // -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") +// require.LessOrEqual(t, 1, 2) +// require.LessOrEqual(t, 2, 2) +// require.LessOrEqual(t, "a", "b") +// require.LessOrEqual(t, "b", "b") func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1206,10 +1206,10 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter // LessOrEqualf asserts that the first element is less than or equal to the second // -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, 1, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, 2, 2, "error message %s", "formatted") +// require.LessOrEqualf(t, "a", "b", "error message %s", "formatted") +// require.LessOrEqualf(t, "b", "b", "error message %s", "formatted") func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1222,9 +1222,9 @@ func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args . // Lessf asserts that the first element is less than the second // -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") +// require.Lessf(t, 1, 2, "error message %s", "formatted") +// require.Lessf(t, float64(1), float64(2), "error message %s", "formatted") +// require.Lessf(t, "a", "b", "error message %s", "formatted") func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1237,8 +1237,8 @@ func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...inter // Negative asserts that the specified element is negative // -// assert.Negative(t, -1) -// assert.Negative(t, -1.23) +// require.Negative(t, -1) +// require.Negative(t, -1.23) func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1251,8 +1251,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Negativef asserts that the specified element is negative // -// assert.Negativef(t, -1, "error message %s", "formatted") -// assert.Negativef(t, -1.23, "error message %s", "formatted") +// require.Negativef(t, -1, "error message %s", "formatted") +// require.Negativef(t, -1.23, "error message %s", "formatted") func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1266,7 +1266,7 @@ func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) { // Never asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) +// require.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1280,7 +1280,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D // Neverf asserts that the given condition doesn't satisfy in waitFor time, // periodically checking the target function each tick. // -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") +// require.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1293,7 +1293,7 @@ func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time. // Nil asserts that the specified object is nil. // -// assert.Nil(t, err) +// require.Nil(t, err) func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1306,7 +1306,7 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // Nilf asserts that the specified object is nil. // -// assert.Nilf(t, err, "error message %s", "formatted") +// require.Nilf(t, err, "error message %s", "formatted") func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1344,8 +1344,8 @@ func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) { // NoError asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoError(t, err) { +// require.Equal(t, expectedObj, actualObj) // } func NoError(t TestingT, err error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1360,8 +1360,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) { // NoErrorf asserts that a function returned no error (i.e. `nil`). // // actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) +// if require.NoErrorf(t, err, "error message %s", "formatted") { +// require.Equal(t, expectedObj, actualObj) // } func NoErrorf(t TestingT, err error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1400,9 +1400,9 @@ func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) { // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") +// require.NotContains(t, "Hello World", "Earth") +// require.NotContains(t, ["Hello", "World"], "Earth") +// require.NotContains(t, {"Hello": "World"}, "Earth") func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1416,9 +1416,9 @@ func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ... // NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the // specified substring or element. // -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") +// require.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") +// require.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") +// require.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1429,11 +1429,51 @@ func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, a t.FailNow() } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// require.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true +// +// require.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true +func NotElementsMatch(t TestingT, listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatch(t, listA, listB, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// require.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// require.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotElementsMatchf(t, listA, listB, msg, args...) { + return + } + t.FailNow() +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmpty(t, obj) { +// require.Equal(t, "two", obj[1]) // } func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1448,8 +1488,8 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) +// if require.NotEmptyf(t, obj, "error message %s", "formatted") { +// require.Equal(t, "two", obj[1]) // } func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1463,7 +1503,7 @@ func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) // NotEqual asserts that the specified values are NOT equal. // -// assert.NotEqual(t, obj1, obj2) +// require.NotEqual(t, obj1, obj2) // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1479,7 +1519,7 @@ func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs . // NotEqualValues asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValues(t, obj1, obj2) +// require.NotEqualValues(t, obj1, obj2) func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1492,7 +1532,7 @@ func NotEqualValues(t TestingT, expected interface{}, actual interface{}, msgAnd // NotEqualValuesf asserts that two objects are not equal even when converted to the same type // -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1505,7 +1545,7 @@ func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg s // NotEqualf asserts that the specified values are NOT equal. // -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") +// require.NotEqualf(t, obj1, obj2, "error message %s", "formatted") // // Pointer variable equality is determined based on the equality of the // referenced values (as opposed to the memory addresses). @@ -1519,7 +1559,31 @@ func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, t.FailNow() } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAs(t, err, target, msgAndArgs...) { + return + } + t.FailNow() +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) { + if h, ok := t.(tHelper); ok { + h.Helper() + } + if assert.NotErrorAsf(t, err, target, msg, args...) { + return + } + t.FailNow() +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1531,7 +1595,7 @@ func NotErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) t.FailNow() } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { @@ -1545,7 +1609,7 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf // NotImplements asserts that an object does not implement the specified interface. // -// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject)) +// require.NotImplements(t, (*MyInterface)(nil), new(MyObject)) func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1558,7 +1622,7 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, // NotImplementsf asserts that an object does not implement the specified interface. // -// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") +// require.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1571,7 +1635,7 @@ func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, // NotNil asserts that the specified object is not nil. // -// assert.NotNil(t, err) +// require.NotNil(t, err) func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1584,7 +1648,7 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { // NotNilf asserts that the specified object is not nil. // -// assert.NotNilf(t, err, "error message %s", "formatted") +// require.NotNilf(t, err, "error message %s", "formatted") func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1597,7 +1661,7 @@ func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) { // NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanics(t, func(){ RemainCalm() }) +// require.NotPanics(t, func(){ RemainCalm() }) func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1610,7 +1674,7 @@ func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. // -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") +// require.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1623,8 +1687,8 @@ func NotPanicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interfac // NotRegexp asserts that a specified regexp does not match a string. // -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") +// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// require.NotRegexp(t, "^start", "it's not starting") func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1637,8 +1701,8 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf // NotRegexpf asserts that a specified regexp does not match a string. // -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") +// require.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") +// require.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1651,7 +1715,7 @@ func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args .. // NotSame asserts that two pointers do not reference the same object. // -// assert.NotSame(t, ptr1, ptr2) +// require.NotSame(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1667,7 +1731,7 @@ func NotSame(t TestingT, expected interface{}, actual interface{}, msgAndArgs .. // NotSamef asserts that two pointers do not reference the same object. // -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") +// require.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1685,8 +1749,8 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubset(t, [1, 3, 4], [1, 2]) -// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) +// require.NotSubset(t, [1, 3, 4], [1, 2]) +// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1701,8 +1765,8 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i // contain all elements given in the specified subset list(array, slice...) or // map. // -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") -// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") +// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") +// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1737,7 +1801,7 @@ func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) { // Panics asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panics(t, func(){ GoCrazy() }) +// require.Panics(t, func(){ GoCrazy() }) func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1752,7 +1816,7 @@ func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1767,7 +1831,7 @@ func PanicsWithError(t TestingT, errString string, f assert.PanicTestFunc, msgAn // panics, and that the recovered panic value is an error that satisfies the // EqualError comparison. // -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1781,7 +1845,7 @@ func PanicsWithErrorf(t TestingT, errString string, f assert.PanicTestFunc, msg // PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) +// require.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1795,7 +1859,7 @@ func PanicsWithValue(t TestingT, expected interface{}, f assert.PanicTestFunc, m // PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that // the recovered panic value equals the expected panic value. // -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") +// require.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1808,7 +1872,7 @@ func PanicsWithValuef(t TestingT, expected interface{}, f assert.PanicTestFunc, // Panicsf asserts that the code inside the specified PanicTestFunc panics. // -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") +// require.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1821,8 +1885,8 @@ func Panicsf(t TestingT, f assert.PanicTestFunc, msg string, args ...interface{} // Positive asserts that the specified element is positive // -// assert.Positive(t, 1) -// assert.Positive(t, 1.23) +// require.Positive(t, 1) +// require.Positive(t, 1.23) func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1835,8 +1899,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) { // Positivef asserts that the specified element is positive // -// assert.Positivef(t, 1, "error message %s", "formatted") -// assert.Positivef(t, 1.23, "error message %s", "formatted") +// require.Positivef(t, 1, "error message %s", "formatted") +// require.Positivef(t, 1.23, "error message %s", "formatted") func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1849,8 +1913,8 @@ func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) { // Regexp asserts that a specified regexp matches a string. // -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") +// require.Regexp(t, regexp.MustCompile("start"), "it's starting") +// require.Regexp(t, "start...$", "it's not starting") func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1863,8 +1927,8 @@ func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface // Regexpf asserts that a specified regexp matches a string. // -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") +// require.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") +// require.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1877,7 +1941,7 @@ func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...in // Same asserts that two pointers reference the same object. // -// assert.Same(t, ptr1, ptr2) +// require.Same(t, ptr1, ptr2) // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1893,7 +1957,7 @@ func Same(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...in // Samef asserts that two pointers reference the same object. // -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") +// require.Samef(t, ptr1, ptr2, "error message %s", "formatted") // // Both arguments must be pointer variables. Pointer variable sameness is // determined based on the equality of both type and value. @@ -1910,8 +1974,8 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg // Subset asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subset(t, [1, 2, 3], [1, 2]) -// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) +// require.Subset(t, [1, 2, 3], [1, 2]) +// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1925,8 +1989,8 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte // Subsetf asserts that the specified list(array, slice...) or map contains all // elements given in the specified subset list(array, slice...) or map. // -// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") -// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") +// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") +// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1939,7 +2003,7 @@ func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args // True asserts that the specified value is true. // -// assert.True(t, myBool) +// require.True(t, myBool) func True(t TestingT, value bool, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1952,7 +2016,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) { // Truef asserts that the specified value is true. // -// assert.Truef(t, myBool, "error message %s", "formatted") +// require.Truef(t, myBool, "error message %s", "formatted") func Truef(t TestingT, value bool, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1965,7 +2029,7 @@ func Truef(t TestingT, value bool, msg string, args ...interface{}) { // WithinDuration asserts that the two times are within duration delta of each other. // -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) +// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1978,7 +2042,7 @@ func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time // WithinDurationf asserts that the two times are within duration delta of each other. // -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") +// require.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -1991,7 +2055,7 @@ func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta tim // WithinRange asserts that a time is within a time range (inclusive). // -// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) +// require.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second)) func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() @@ -2004,7 +2068,7 @@ func WithinRange(t TestingT, actual time.Time, start time.Time, end time.Time, m // WithinRangef asserts that a time is within a time range (inclusive). // -// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") +// require.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted") func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) { if h, ok := t.(tHelper); ok { h.Helper() diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl index 55e42ddebd..8b32836850 100644 --- a/vendor/github.com/stretchr/testify/require/require.go.tmpl +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -1,4 +1,4 @@ -{{.Comment}} +{{ replace .Comment "assert." "require."}} func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { if h, ok := t.(tHelper); ok { h.Helper() } if assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { return } diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go index eee8310a5f..1bd87304f4 100644 --- a/vendor/github.com/stretchr/testify/require/require_forward.go +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -187,8 +187,8 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface EqualExportedValuesf(a.t, expected, actual, msg, args...) } -// EqualValues asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValues asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValues(uint32(123), int32(123)) func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { @@ -198,8 +198,8 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn EqualValues(a.t, expected, actual, msgAndArgs...) } -// EqualValuesf asserts that two objects are equal or convertible to the same types -// and equal. +// EqualValuesf asserts that two objects are equal or convertible to the larger +// type and equal. // // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) { @@ -337,7 +337,7 @@ func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, ti // a.EventuallyWithT(func(c *assert.CollectT) { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -362,7 +362,7 @@ func (a *Assertions) EventuallyWithT(condition func(collect *assert.CollectT), w // a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") { // // add assertions as needed; any assertion failure will fail the current tick // assert.True(c, externalValue, "expected 'externalValue' to be true") -// }, 1*time.Second, 10*time.Second, "external state has not changed to 'true'; still false") +// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false") func (a *Assertions) EventuallyWithTf(condition func(collect *assert.CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { h.Helper() @@ -1129,6 +1129,40 @@ func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg strin NotContainsf(a.t, s, contains, msg, args...) } +// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false +// +// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true +// +// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true +func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatch(a.t, listA, listB, msgAndArgs...) +} + +// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified +// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, +// the number of appearances of each of them in both lists should not match. +// This is an inverse of ElementsMatch. +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false +// +// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true +// +// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true +func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotElementsMatchf(a.t, listA, listB, msg, args...) +} + // NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // a slice or a channel with len == 0. // @@ -1201,7 +1235,25 @@ func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg str NotEqualf(a.t, expected, actual, msg, args...) } -// NotErrorIs asserts that at none of the errors in err's chain matches target. +// NotErrorAs asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAs(a.t, err, target, msgAndArgs...) +} + +// NotErrorAsf asserts that none of the errors in err's chain matches target, +// but if so, sets target to that error value. +func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) { + if h, ok := a.t.(tHelper); ok { + h.Helper() + } + NotErrorAsf(a.t, err, target, msg, args...) +} + +// NotErrorIs asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) { if h, ok := a.t.(tHelper); ok { @@ -1210,7 +1262,7 @@ func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface NotErrorIs(a.t, err, target, msgAndArgs...) } -// NotErrorIsf asserts that at none of the errors in err's chain matches target. +// NotErrorIsf asserts that none of the errors in err's chain matches target. // This is a wrapper for errors.Is. func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) { if h, ok := a.t.(tHelper); ok { diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index 91772dfeb9..6b7ce929eb 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -6,7 +6,7 @@ type TestingT interface { FailNow() } -type tHelper interface { +type tHelper = interface { Helper() } diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go index 56cdc7c21c..c9202b05da 100644 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ b/vendor/golang.org/x/crypto/ssh/handshake.go @@ -25,6 +25,11 @@ const debugHandshake = false // quickly. const chanSize = 16 +// maxPendingPackets sets the maximum number of packets to queue while waiting +// for KEX to complete. This limits the total pending data to maxPendingPackets +// * maxPacket bytes, which is ~16.8MB. +const maxPendingPackets = 64 + // keyingTransport is a packet based transport that supports key // changes. It need not be thread-safe. It should pass through // msgNewKeys in both directions. @@ -73,13 +78,22 @@ type handshakeTransport struct { incoming chan []byte readError error - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. + mu sync.Mutex + // Condition for the above mutex. It is used to notify a completed key + // exchange or a write failure. Writes can wait for this condition while a + // key exchange is in progress. + writeCond *sync.Cond + writeError error + sentInitPacket []byte + sentInitMsg *kexInitMsg + // Used to queue writes when a key exchange is in progress. The length is + // limited by pendingPacketsSize. Once full, writes will block until the key + // exchange is completed or an error occurs. If not empty, it is emptied + // all at once when the key exchange is completed in kexLoop. + pendingPackets [][]byte writePacketsLeft uint32 writeBytesLeft int64 + userAuthComplete bool // whether the user authentication phase is complete // If the read loop wants to schedule a kex, it pings this // channel, and the write loop will send out a kex @@ -133,6 +147,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, config: config, } + t.writeCond = sync.NewCond(&t.mu) t.resetReadThresholds() t.resetWriteThresholds() @@ -259,6 +274,7 @@ func (t *handshakeTransport) recordWriteError(err error) { defer t.mu.Unlock() if t.writeError == nil && err != nil { t.writeError = err + t.writeCond.Broadcast() } } @@ -362,6 +378,8 @@ write: } } t.pendingPackets = t.pendingPackets[:0] + // Unblock writePacket if waiting for KEX. + t.writeCond.Broadcast() t.mu.Unlock() } @@ -552,26 +570,44 @@ func (t *handshakeTransport) sendKexInit() error { return nil } +var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase") + func (t *handshakeTransport) writePacket(p []byte) error { + t.mu.Lock() + defer t.mu.Unlock() + switch p[0] { case msgKexInit: return errors.New("ssh: only handshakeTransport can send kexInit") case msgNewKeys: return errors.New("ssh: only handshakeTransport can send newKeys") + case msgUserAuthBanner: + if t.userAuthComplete { + return errSendBannerPhase + } + case msgUserAuthSuccess: + t.userAuthComplete = true } - t.mu.Lock() - defer t.mu.Unlock() if t.writeError != nil { return t.writeError } if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil + if len(t.pendingPackets) < maxPendingPackets { + // Copy the packet so the writer can reuse the buffer. + cp := make([]byte, len(p)) + copy(cp, p) + t.pendingPackets = append(t.pendingPackets, cp) + return nil + } + for t.sentInitMsg != nil { + // Block and wait for KEX to complete or an error. + t.writeCond.Wait() + if t.writeError != nil { + return t.writeError + } + } } if t.writeBytesLeft > 0 { @@ -588,6 +624,7 @@ func (t *handshakeTransport) writePacket(p []byte) error { if err := t.pushPacket(p); err != nil { t.writeError = err + t.writeCond.Broadcast() } return nil diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go index b55f860564..118427bc05 100644 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ b/vendor/golang.org/x/crypto/ssh/messages.go @@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) { return new(userAuthSuccessMsg), nil case msgUserAuthFailure: msg = new(userAuthFailureMsg) + case msgUserAuthBanner: + msg = new(userAuthBannerMsg) case msgUserAuthPubKeyOk: msg = new(userAuthPubKeyOkMsg) case msgGlobalRequest: diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index 5b5ccd96f4..1839ddc6a4 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct { Server GSSAPIServer } +// SendAuthBanner implements [ServerPreAuthConn]. +func (s *connection) SendAuthBanner(msg string) error { + return s.transport.writePacket(Marshal(&userAuthBannerMsg{ + Message: msg, + })) +} + +func (*connection) unexportedMethodForFutureProofing() {} + +// ServerPreAuthConn is the interface available on an incoming server +// connection before authentication has completed. +type ServerPreAuthConn interface { + unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB + + ConnMetadata + + // SendAuthBanner sends a banner message to the client. + // It returns an error once the authentication phase has ended. + SendAuthBanner(string) error +} + // ServerConfig holds server specific configuration data. type ServerConfig struct { // Config contains configuration shared between client and server. @@ -118,6 +139,12 @@ type ServerConfig struct { // attempts. AuthLogCallback func(conn ConnMetadata, method string, err error) + // PreAuthConnCallback, if non-nil, is called upon receiving a new connection + // before any authentication has started. The provided ServerPreAuthConn + // can be used at any time before authentication is complete, including + // after this callback has returned. + PreAuthConnCallback func(ServerPreAuthConn) + // ServerVersion is the version identification string to announce in // the public handshake. // If empty, a reasonable default is used. @@ -488,6 +515,10 @@ func (b *BannerError) Error() string { } func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { + if config.PreAuthConnCallback != nil { + config.PreAuthConnCallback(s) + } + sessionID := s.transport.getSessionID() var cache pubKeyCache var perms *Permissions @@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err authFailures := 0 noneAuthCount := 0 var authErrs []error - var displayedBanner bool + var calledBannerCallback bool partialSuccessReturned := false // Set the initial authentication callbacks from the config. They can be // changed if a PartialSuccessError is returned. @@ -542,14 +573,10 @@ userAuthLoop: s.user = userAuthReq.User - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if !calledBannerCallback && config.BannerCallback != nil { + calledBannerCallback = true + if msg := config.BannerCallback(s); msg != "" { + if err := s.SendAuthBanner(msg); err != nil { return nil, err } } @@ -762,10 +789,7 @@ userAuthLoop: var bannerErr *BannerError if errors.As(authErr, &bannerErr) { if bannerErr.Message != "" { - bannerMsg := &userAuthBannerMsg{ - Message: bannerErr.Message, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { + if err := s.SendAuthBanner(bannerErr.Message); err != nil { return nil, err } } diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go index ef5059a11d..93d844f035 100644 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ b/vendor/golang.org/x/crypto/ssh/tcpip.go @@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel return nil, err } go DiscardRequests(in) - return ch, err + return ch, nil } type tcpChan struct { diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go index cf66309c4a..db1c95fab1 100644 --- a/vendor/golang.org/x/net/context/context.go +++ b/vendor/golang.org/x/net/context/context.go @@ -3,29 +3,31 @@ // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries +// cancellation signals, and other request-scoped values across API boundaries // and between processes. // As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. +// name [context], and migrating to it can be done automatically with [go fix]. // -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// Incoming requests to a server should create a [Context], and outgoing +// calls to servers should accept a Context. The chain of function +// calls between them must propagate the Context, optionally replacing +// it with a derived Context created using [WithCancel], [WithDeadline], +// [WithTimeout], or [WithValue]. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. This is discussed further in +// https://go.dev/blog/context-and-structs. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO] // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -34,9 +36,30 @@ // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // -// See http://blog.golang.org/context for example code for a server that uses +// See https://go.dev/blog/context for example code for a server that uses // Contexts. -package context // import "golang.org/x/net/context" +// +// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +// A Context carries a deadline, a cancellation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// Canceled is the error returned by [Context.Err] when the context is canceled +// for some reason other than its deadline passing. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled +// due to its deadline passing. +var DeadlineExceeded = context.DeadlineExceeded // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, @@ -49,8 +72,73 @@ func Background() Context { // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. +// parameter). func TODO() Context { return todo } + +var ( + background = context.Background() + todo = context.TODO() +) + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// A CancelFunc may be called by multiple goroutines simultaneously. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc + +// WithCancel returns a derived context that points to the parent context +// but has a new Done channel. The returned context's Done channel is closed +// when the returned cancel function is called or when the parent context's +// Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + return context.WithCancel(parent) +} + +// WithDeadline returns a derived context that points to the parent context +// but has the deadline adjusted to be no later than d. If the parent's +// deadline is already earlier than d, WithDeadline(parent, d) is semantically +// equivalent to parent. The returned [Context.Done] channel is closed when +// the deadline expires, when the returned cancel function is called, +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete. +func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) { + return context.WithDeadline(parent, d) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this [Context] complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return context.WithTimeout(parent, timeout) +} + +// WithValue returns a derived context that points to the parent Context. +// In the derived context, the value associated with key is val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The provided key must be comparable and should not be of type +// string or any other built-in type to avoid collisions between +// packages using context. Users of WithValue should define their own +// types for keys. To avoid allocating when assigning to an +// interface{}, context keys often have concrete type +// struct{}. Alternatively, exported context key variables' static +// type should be a pointer or interface. +func WithValue(parent Context, key, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 0c1b867937..0000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index e31e35a904..0000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 065ff3dfa5..0000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index ec5a638033..0000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go index 2a938864cb..b460e6f722 100644 --- a/vendor/golang.org/x/net/html/atom/table.go +++ b/vendor/golang.org/x/net/html/atom/table.go @@ -11,23 +11,23 @@ const ( AcceptCharset Atom = 0x1a0e Accesskey Atom = 0x2c09 Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 + Action Atom = 0x26506 + Address Atom = 0x6f107 Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f + Allowfullscreen Atom = 0x3280f Allowpaymentrequest Atom = 0xc113 Allowusermedia Atom = 0xdd0e Alt Atom = 0xf303 Annotation Atom = 0x1c90a AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 + Applet Atom = 0x30806 + Area Atom = 0x35004 + Article Atom = 0x3f607 As Atom = 0x3c02 Aside Atom = 0x10705 Async Atom = 0xff05 Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c + Autocomplete Atom = 0x26b0c Autofocus Atom = 0x12109 Autoplay Atom = 0x13c08 B Atom = 0x101 @@ -43,34 +43,34 @@ const ( Br Atom = 0x202 Button Atom = 0x19106 Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 + Caption Atom = 0x22407 + Center Atom = 0x21306 + Challenge Atom = 0x28e09 Charset Atom = 0x2107 - Checked Atom = 0x47907 + Checked Atom = 0x5b507 Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 + Class Atom = 0x55805 + Code Atom = 0x5ee04 Col Atom = 0x1ab03 Colgroup Atom = 0x1ab08 Color Atom = 0x1bf05 Cols Atom = 0x1c404 Colspan Atom = 0x1c407 Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b + Content Atom = 0x57b07 + Contenteditable Atom = 0x57b0f + Contextmenu Atom = 0x37a0b Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 + Coords Atom = 0x1f006 + Crossorigin Atom = 0x1fa0b + Data Atom = 0x49904 + Datalist Atom = 0x49908 + Datetime Atom = 0x2ab08 + Dd Atom = 0x2bf02 Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 + Defer Atom = 0x5f005 + Del Atom = 0x44c03 + Desc Atom = 0x55504 Details Atom = 0x7207 Dfn Atom = 0x8703 Dialog Atom = 0xbb06 @@ -78,106 +78,106 @@ const ( Dirname Atom = 0x9307 Disabled Atom = 0x16408 Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 + Dl Atom = 0x5d602 + Download Atom = 0x45d08 Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 + Dropzone Atom = 0x3ff08 + Dt Atom = 0x64002 Em Atom = 0x6e02 Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 + Enctype Atom = 0x28007 + Face Atom = 0x21104 + Fieldset Atom = 0x21908 + Figcaption Atom = 0x2210a + Figure Atom = 0x23b06 Font Atom = 0x3f04 Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a + For Atom = 0x24703 + ForeignObject Atom = 0x2470d + Foreignobject Atom = 0x2540d + Form Atom = 0x26104 + Formaction Atom = 0x2610a + Formenctype Atom = 0x27c0b + Formmethod Atom = 0x2970a + Formnovalidate Atom = 0x2a10e + Formtarget Atom = 0x2b30a Frame Atom = 0x8b05 Frameset Atom = 0x8b08 H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 + H2 Atom = 0x56102 + H3 Atom = 0x2cd02 + H4 Atom = 0x2fc02 + H5 Atom = 0x33f02 + H6 Atom = 0x34902 + Head Atom = 0x32004 + Header Atom = 0x32006 + Headers Atom = 0x32007 Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 + Hgroup Atom = 0x64206 + Hidden Atom = 0x2bd06 + High Atom = 0x2ca04 Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 + Href Atom = 0x2cf04 + Hreflang Atom = 0x2cf08 Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a + HttpEquiv Atom = 0x2d70a I Atom = 0x601 - Icon Atom = 0x58a04 + Icon Atom = 0x57a04 Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 + Iframe Atom = 0x2eb06 + Image Atom = 0x2f105 + Img Atom = 0x2f603 + Input Atom = 0x44505 + Inputmode Atom = 0x44509 + Ins Atom = 0x20303 + Integrity Atom = 0x23209 Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 + Isindex Atom = 0x2fe07 + Ismap Atom = 0x30505 + Itemid Atom = 0x38506 Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 + Itemref Atom = 0x3c707 + Itemscope Atom = 0x66f09 + Itemtype Atom = 0x30e08 Kbd Atom = 0xb903 Keygen Atom = 0x3206 Keytype Atom = 0xd607 Kind Atom = 0x17704 Label Atom = 0x5905 - Lang Atom = 0x2e404 + Lang Atom = 0x2d304 Legend Atom = 0x18106 Li Atom = 0xb202 Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 + List Atom = 0x49d04 + Listing Atom = 0x49d07 Loop Atom = 0x5d04 Low Atom = 0xc303 Main Atom = 0x1004 Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 + Manifest Atom = 0x6d508 + Map Atom = 0x30703 Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 + Marquee Atom = 0x31607 + Math Atom = 0x31d04 + Max Atom = 0x33703 + Maxlength Atom = 0x33709 Media Atom = 0xe605 Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 + Menu Atom = 0x38104 + Menuitem Atom = 0x38108 + Meta Atom = 0x4ac04 Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 + Method Atom = 0x29b06 + Mglyph Atom = 0x2f706 + Mi Atom = 0x34102 + Min Atom = 0x34103 + Minlength Atom = 0x34109 + Mn Atom = 0x2a402 Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 + Ms Atom = 0x67202 + Mtext Atom = 0x34b05 + Multiple Atom = 0x35908 + Muted Atom = 0x36105 Name Atom = 0x9604 Nav Atom = 0x1303 Nobr Atom = 0x3704 @@ -185,101 +185,101 @@ const ( Noframes Atom = 0x8908 Nomodule Atom = 0xa208 Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 + Noscript Atom = 0x2c208 + Novalidate Atom = 0x2a50a + Object Atom = 0x25b06 Ol Atom = 0x13702 Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 + Onafterprint Atom = 0x2290c + Onautocomplete Atom = 0x2690e + Onautocompleteerror Atom = 0x26913 + Onauxclick Atom = 0x6140a + Onbeforeprint Atom = 0x69c0d + Onbeforeunload Atom = 0x6e50e + Onblur Atom = 0x1ea06 Oncancel Atom = 0x11908 Oncanplay Atom = 0x14d09 Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 + Onchange Atom = 0x41508 + Onclick Atom = 0x2e407 + Onclose Atom = 0x36607 + Oncontextmenu Atom = 0x3780d + Oncopy Atom = 0x38b06 + Oncuechange Atom = 0x3910b + Oncut Atom = 0x39c05 + Ondblclick Atom = 0x3a10a + Ondrag Atom = 0x3ab06 + Ondragend Atom = 0x3ab09 + Ondragenter Atom = 0x3b40b + Ondragexit Atom = 0x3bf0a + Ondragleave Atom = 0x3d90b + Ondragover Atom = 0x3e40a + Ondragstart Atom = 0x3ee0b + Ondrop Atom = 0x3fd06 + Ondurationchange Atom = 0x40d10 + Onemptied Atom = 0x40409 + Onended Atom = 0x41d07 + Onerror Atom = 0x42407 + Onfocus Atom = 0x42b07 + Onhashchange Atom = 0x4370c + Oninput Atom = 0x44307 + Oninvalid Atom = 0x44f09 + Onkeydown Atom = 0x45809 + Onkeypress Atom = 0x4650a + Onkeyup Atom = 0x47407 + Onlanguagechange Atom = 0x48110 + Onload Atom = 0x49106 + Onloadeddata Atom = 0x4910c + Onloadedmetadata Atom = 0x4a410 + Onloadend Atom = 0x4ba09 + Onloadstart Atom = 0x4c30b + Onmessage Atom = 0x4ce09 + Onmessageerror Atom = 0x4ce0e + Onmousedown Atom = 0x4dc0b + Onmouseenter Atom = 0x4e70c + Onmouseleave Atom = 0x4f30c + Onmousemove Atom = 0x4ff0b + Onmouseout Atom = 0x50a0a + Onmouseover Atom = 0x5170b + Onmouseup Atom = 0x52209 + Onmousewheel Atom = 0x5300c + Onoffline Atom = 0x53c09 + Ononline Atom = 0x54508 + Onpagehide Atom = 0x54d0a + Onpageshow Atom = 0x5630a + Onpaste Atom = 0x56f07 + Onpause Atom = 0x58a07 + Onplay Atom = 0x59406 + Onplaying Atom = 0x59409 + Onpopstate Atom = 0x59d0a + Onprogress Atom = 0x5a70a + Onratechange Atom = 0x5bc0c + Onrejectionhandled Atom = 0x5c812 + Onreset Atom = 0x5da07 + Onresize Atom = 0x5e108 + Onscroll Atom = 0x5f508 + Onsecuritypolicyviolation Atom = 0x5fd19 + Onseeked Atom = 0x61e08 + Onseeking Atom = 0x62609 + Onselect Atom = 0x62f08 + Onshow Atom = 0x63906 + Onsort Atom = 0x64d06 + Onstalled Atom = 0x65709 + Onstorage Atom = 0x66009 + Onsubmit Atom = 0x66908 + Onsuspend Atom = 0x67909 Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 + Ontoggle Atom = 0x68208 + Onunhandledrejection Atom = 0x68a14 + Onunload Atom = 0x6a908 + Onvolumechange Atom = 0x6b10e + Onwaiting Atom = 0x6bf09 + Onwheel Atom = 0x6c807 Open Atom = 0x1a304 Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 + Optimum Atom = 0x6cf07 + Option Atom = 0x6e106 + Output Atom = 0x51106 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x6607 @@ -288,466 +288,468 @@ const ( Placeholder Atom = 0x1310b Plaintext Atom = 0x1b209 Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 + Poster Atom = 0x64706 + Pre Atom = 0x46a03 + Preload Atom = 0x47a07 + Progress Atom = 0x5a908 + Prompt Atom = 0x52a06 + Public Atom = 0x57606 Q Atom = 0xcf01 Radiogroup Atom = 0x30a Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 + Readonly Atom = 0x35108 + Referrerpolicy Atom = 0x3cb0e + Rel Atom = 0x47b03 + Required Atom = 0x23f08 Reversed Atom = 0x8008 Rows Atom = 0x9c04 Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 + Rp Atom = 0x22f02 Rt Atom = 0x19a02 Rtc Atom = 0x19a03 Ruby Atom = 0xfb04 S Atom = 0x2501 Samp Atom = 0x7804 Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 + Scope Atom = 0x67305 + Scoped Atom = 0x67306 + Script Atom = 0x2c406 + Seamless Atom = 0x36b08 + Search Atom = 0x55c06 + Section Atom = 0x1e507 + Select Atom = 0x63106 + Selected Atom = 0x63108 + Shape Atom = 0x1f505 + Size Atom = 0x5e504 + Sizes Atom = 0x5e505 + Slot Atom = 0x20504 + Small Atom = 0x32605 + Sortable Atom = 0x64f08 + Sorted Atom = 0x37206 + Source Atom = 0x43106 + Spacer Atom = 0x46e06 Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 + Spellcheck Atom = 0x5b00a + Src Atom = 0x5e903 + Srcdoc Atom = 0x5e906 + Srclang Atom = 0x6f707 + Srcset Atom = 0x6fe06 + Start Atom = 0x3f405 + Step Atom = 0x57304 Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 + Strong Atom = 0x6db06 + Style Atom = 0x70405 + Sub Atom = 0x66b03 + Summary Atom = 0x70907 + Sup Atom = 0x71003 + Svg Atom = 0x71303 + System Atom = 0x71606 + Tabindex Atom = 0x4b208 + Table Atom = 0x58505 + Target Atom = 0x2b706 Tbody Atom = 0x2705 Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 + Template Atom = 0x71908 + Textarea Atom = 0x34c08 Tfoot Atom = 0xf505 Th Atom = 0x15602 - Thead Atom = 0x33005 + Thead Atom = 0x31f05 Time Atom = 0x4204 Title Atom = 0x11005 Tr Atom = 0xcc02 Track Atom = 0x1ba05 - Translate Atom = 0x1f209 + Translate Atom = 0x20809 Tt Atom = 0x6802 Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d + Typemustmatch Atom = 0x2830d U Atom = 0xb01 Ul Atom = 0xa702 Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 + Usemap Atom = 0x58e06 Value Atom = 0x1505 Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 + Video Atom = 0x2e005 + Wbr Atom = 0x56c03 + Width Atom = 0x63e05 + Workertype Atom = 0x7210a + Wrap Atom = 0x72b04 Xmp Atom = 0x12f03 ) -const hash0 = 0x81cdf10e +const hash0 = 0x84f70e16 const maxAtomLen = 25 var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command + 0x1: 0x3ff08, // dropzone + 0x2: 0x3b08, // basefont + 0x3: 0x23209, // integrity + 0x4: 0x43106, // source + 0x5: 0x2c09, // accesskey + 0x6: 0x1a06, // accept + 0x7: 0x6c807, // onwheel + 0xb: 0x47407, // onkeyup + 0xc: 0x32007, // headers + 0xd: 0x67306, // scoped + 0xe: 0x67909, // onsuspend + 0xf: 0x8908, // noframes + 0x10: 0x1fa0b, // crossorigin + 0x11: 0x2e407, // onclick + 0x12: 0x3f405, // start + 0x13: 0x37a0b, // contextmenu + 0x14: 0x5e903, // src + 0x15: 0x1c404, // cols + 0x16: 0xbb06, // dialog + 0x17: 0x47a07, // preload + 0x18: 0x3c707, // itemref + 0x1b: 0x2f105, // image + 0x1d: 0x4ba09, // onloadend + 0x1e: 0x45d08, // download + 0x1f: 0x46a03, // pre + 0x23: 0x2970a, // formmethod + 0x24: 0x71303, // svg + 0x25: 0xcf01, // q + 0x26: 0x64002, // dt + 0x27: 0x1de08, // controls + 0x2a: 0x2804, // body + 0x2b: 0xd206, // strike + 0x2c: 0x3910b, // oncuechange + 0x2d: 0x4c30b, // onloadstart + 0x2e: 0x2fe07, // isindex + 0x2f: 0xb202, // li + 0x30: 0x1400b, // playsinline + 0x31: 0x34102, // mi + 0x32: 0x30806, // applet + 0x33: 0x4ce09, // onmessage + 0x35: 0x13702, // ol + 0x36: 0x1a304, // open + 0x39: 0x14d09, // oncanplay + 0x3a: 0x6bf09, // onwaiting + 0x3b: 0x11908, // oncancel + 0x3c: 0x6a908, // onunload + 0x3e: 0x53c09, // onoffline + 0x3f: 0x1a0e, // accept-charset + 0x40: 0x32004, // head + 0x42: 0x3ab09, // ondragend + 0x43: 0x1310b, // placeholder + 0x44: 0x2b30a, // formtarget + 0x45: 0x2540d, // foreignobject + 0x47: 0x400c, // ontimeupdate + 0x48: 0xdd0e, // allowusermedia + 0x4a: 0x69c0d, // onbeforeprint + 0x4b: 0x5604, // html + 0x4c: 0x9f04, // span + 0x4d: 0x64206, // hgroup + 0x4e: 0x16408, // disabled + 0x4f: 0x4204, // time + 0x51: 0x42b07, // onfocus + 0x53: 0xb00a, // malignmark + 0x55: 0x4650a, // onkeypress + 0x56: 0x55805, // class + 0x57: 0x1ab08, // colgroup + 0x58: 0x33709, // maxlength + 0x59: 0x5a908, // progress + 0x5b: 0x70405, // style + 0x5c: 0x2a10e, // formnovalidate + 0x5e: 0x38b06, // oncopy + 0x60: 0x26104, // form + 0x61: 0xf606, // footer + 0x64: 0x30a, // radiogroup + 0x66: 0xfb04, // ruby + 0x67: 0x4ff0b, // onmousemove + 0x68: 0x19d08, // itemprop + 0x69: 0x2d70a, // http-equiv + 0x6a: 0x15602, // th + 0x6c: 0x6e02, // em + 0x6d: 0x38108, // menuitem + 0x6e: 0x63106, // select + 0x6f: 0x48110, // onlanguagechange + 0x70: 0x31f05, // thead + 0x71: 0x15c02, // h1 + 0x72: 0x5e906, // srcdoc + 0x75: 0x9604, // name + 0x76: 0x19106, // button + 0x77: 0x55504, // desc + 0x78: 0x17704, // kind + 0x79: 0x1bf05, // color + 0x7c: 0x58e06, // usemap + 0x7d: 0x30e08, // itemtype + 0x7f: 0x6d508, // manifest + 0x81: 0x5300c, // onmousewheel + 0x82: 0x4dc0b, // onmousedown + 0x84: 0xc05, // param + 0x85: 0x2e005, // video + 0x86: 0x4910c, // onloadeddata + 0x87: 0x6f107, // address + 0x8c: 0xef04, // ping + 0x8d: 0x24703, // for + 0x8f: 0x62f08, // onselect + 0x90: 0x30703, // map + 0x92: 0xc01, // p + 0x93: 0x8008, // reversed + 0x94: 0x54d0a, // onpagehide + 0x95: 0x3206, // keygen + 0x96: 0x34109, // minlength + 0x97: 0x3e40a, // ondragover + 0x98: 0x42407, // onerror + 0x9a: 0x2107, // charset + 0x9b: 0x29b06, // method + 0x9c: 0x101, // b + 0x9d: 0x68208, // ontoggle + 0x9e: 0x2bd06, // hidden + 0xa0: 0x3f607, // article + 0xa2: 0x63906, // onshow + 0xa3: 0x64d06, // onsort + 0xa5: 0x57b0f, // contenteditable + 0xa6: 0x66908, // onsubmit + 0xa8: 0x44f09, // oninvalid + 0xaa: 0x202, // br + 0xab: 0x10902, // id + 0xac: 0x5d04, // loop + 0xad: 0x5630a, // onpageshow + 0xb0: 0x2cf04, // href + 0xb2: 0x2210a, // figcaption + 0xb3: 0x2690e, // onautocomplete + 0xb4: 0x49106, // onload + 0xb6: 0x9c04, // rows + 0xb7: 0x1a605, // nonce + 0xb8: 0x68a14, // onunhandledrejection + 0xbb: 0x21306, // center + 0xbc: 0x59406, // onplay + 0xbd: 0x33f02, // h5 + 0xbe: 0x49d07, // listing + 0xbf: 0x57606, // public + 0xc2: 0x23b06, // figure + 0xc3: 0x57a04, // icon + 0xc4: 0x1ab03, // col + 0xc5: 0x47b03, // rel + 0xc6: 0xe605, // media + 0xc7: 0x12109, // autofocus + 0xc8: 0x19a02, // rt + 0xca: 0x2d304, // lang + 0xcc: 0x49908, // datalist + 0xce: 0x2eb06, // iframe + 0xcf: 0x36105, // muted + 0xd0: 0x6140a, // onauxclick + 0xd2: 0x3c02, // as + 0xd6: 0x3fd06, // ondrop + 0xd7: 0x1c90a, // annotation + 0xd8: 0x21908, // fieldset + 0xdb: 0x2cf08, // hreflang + 0xdc: 0x4e70c, // onmouseenter + 0xdd: 0x2a402, // mn + 0xde: 0xe60a, // mediagroup + 0xdf: 0x9805, // meter + 0xe0: 0x56c03, // wbr + 0xe2: 0x63e05, // width + 0xe3: 0x2290c, // onafterprint + 0xe4: 0x30505, // ismap + 0xe5: 0x1505, // value + 0xe7: 0x1303, // nav + 0xe8: 0x54508, // ononline + 0xe9: 0xb604, // mark + 0xea: 0xc303, // low + 0xeb: 0x3ee0b, // ondragstart + 0xef: 0x12f03, // xmp + 0xf0: 0x22407, // caption + 0xf1: 0xd904, // type + 0xf2: 0x70907, // summary + 0xf3: 0x6802, // tt + 0xf4: 0x20809, // translate + 0xf5: 0x1870a, // blockquote + 0xf8: 0x15702, // hr + 0xfa: 0x2705, // tbody + 0xfc: 0x7b07, // picture + 0xfd: 0x5206, // height + 0xfe: 0x19c04, // cite + 0xff: 0x2501, // s + 0x101: 0xff05, // async + 0x102: 0x56f07, // onpaste + 0x103: 0x19507, // onabort + 0x104: 0x2b706, // target + 0x105: 0x14b03, // bdo + 0x106: 0x1f006, // coords + 0x107: 0x5e108, // onresize + 0x108: 0x71908, // template + 0x10a: 0x3a02, // rb + 0x10b: 0x2a50a, // novalidate + 0x10c: 0x460e, // updateviacache + 0x10d: 0x71003, // sup + 0x10e: 0x6c07, // noembed + 0x10f: 0x16b03, // div + 0x110: 0x6f707, // srclang + 0x111: 0x17a09, // draggable + 0x112: 0x67305, // scope + 0x113: 0x5905, // label + 0x114: 0x22f02, // rp + 0x115: 0x23f08, // required + 0x116: 0x3780d, // oncontextmenu + 0x117: 0x5e504, // size + 0x118: 0x5b00a, // spellcheck + 0x119: 0x3f04, // font + 0x11a: 0x9c07, // rowspan + 0x11b: 0x10a07, // default + 0x11d: 0x44307, // oninput + 0x11e: 0x38506, // itemid + 0x11f: 0x5ee04, // code + 0x120: 0xaa07, // acronym + 0x121: 0x3b04, // base + 0x125: 0x2470d, // foreignObject + 0x126: 0x2ca04, // high + 0x127: 0x3cb0e, // referrerpolicy + 0x128: 0x33703, // max + 0x129: 0x59d0a, // onpopstate + 0x12a: 0x2fc02, // h4 + 0x12b: 0x4ac04, // meta + 0x12c: 0x17305, // blink + 0x12e: 0x5f508, // onscroll + 0x12f: 0x59409, // onplaying + 0x130: 0xc113, // allowpaymentrequest + 0x131: 0x19a03, // rtc + 0x132: 0x72b04, // wrap + 0x134: 0x8b08, // frameset + 0x135: 0x32605, // small + 0x137: 0x32006, // header + 0x138: 0x40409, // onemptied + 0x139: 0x34902, // h6 + 0x13a: 0x35908, // multiple + 0x13c: 0x52a06, // prompt + 0x13f: 0x28e09, // challenge + 0x141: 0x4370c, // onhashchange + 0x142: 0x57b07, // content + 0x143: 0x1c90e, // annotation-xml + 0x144: 0x36607, // onclose + 0x145: 0x14d10, // oncanplaythrough + 0x148: 0x5170b, // onmouseover + 0x149: 0x64f08, // sortable + 0x14a: 0xa402, // mo + 0x14b: 0x2cd02, // h3 + 0x14c: 0x2c406, // script + 0x14d: 0x41d07, // onended + 0x14f: 0x64706, // poster + 0x150: 0x7210a, // workertype + 0x153: 0x1f505, // shape + 0x154: 0x4, // abbr + 0x155: 0x1, // a + 0x156: 0x2bf02, // dd + 0x157: 0x71606, // system + 0x158: 0x4ce0e, // onmessageerror + 0x159: 0x36b08, // seamless + 0x15a: 0x2610a, // formaction + 0x15b: 0x6e106, // option + 0x15c: 0x31d04, // math + 0x15d: 0x62609, // onseeking + 0x15e: 0x39c05, // oncut + 0x15f: 0x44c03, // del + 0x160: 0x11005, // title + 0x161: 0x11505, // audio + 0x162: 0x63108, // selected + 0x165: 0x3b40b, // ondragenter + 0x166: 0x46e06, // spacer + 0x167: 0x4a410, // onloadedmetadata + 0x168: 0x44505, // input + 0x16a: 0x58505, // table + 0x16b: 0x41508, // onchange + 0x16e: 0x5f005, // defer + 0x171: 0x50a0a, // onmouseout + 0x172: 0x20504, // slot + 0x175: 0x3704, // nobr + 0x177: 0x1d707, // command + 0x17a: 0x7207, // details + 0x17b: 0x38104, // menu + 0x17c: 0xb903, // kbd + 0x17d: 0x57304, // step + 0x17e: 0x20303, // ins + 0x17f: 0x13c08, // autoplay + 0x182: 0x34103, // min + 0x183: 0x17404, // link + 0x185: 0x40d10, // ondurationchange + 0x186: 0x9202, // td + 0x187: 0x8b05, // frame + 0x18a: 0x2ab08, // datetime + 0x18b: 0x44509, // inputmode + 0x18c: 0x35108, // readonly + 0x18d: 0x21104, // face + 0x18f: 0x5e505, // sizes + 0x191: 0x4b208, // tabindex + 0x192: 0x6db06, // strong + 0x193: 0xba03, // bdi + 0x194: 0x6fe06, // srcset + 0x196: 0x67202, // ms + 0x197: 0x5b507, // checked + 0x198: 0xb105, // align + 0x199: 0x1e507, // section + 0x19b: 0x6e05, // embed + 0x19d: 0x15e07, // bgsound + 0x1a2: 0x49d04, // list + 0x1a3: 0x61e08, // onseeked + 0x1a4: 0x66009, // onstorage + 0x1a5: 0x2f603, // img + 0x1a6: 0xf505, // tfoot + 0x1a9: 0x26913, // onautocompleteerror + 0x1aa: 0x5fd19, // onsecuritypolicyviolation + 0x1ad: 0x9303, // dir + 0x1ae: 0x9307, // dirname + 0x1b0: 0x5a70a, // onprogress + 0x1b2: 0x65709, // onstalled + 0x1b5: 0x66f09, // itemscope + 0x1b6: 0x49904, // data + 0x1b7: 0x3d90b, // ondragleave + 0x1b8: 0x56102, // h2 + 0x1b9: 0x2f706, // mglyph + 0x1ba: 0x16502, // is + 0x1bb: 0x6e50e, // onbeforeunload + 0x1bc: 0x2830d, // typemustmatch + 0x1bd: 0x3ab06, // ondrag + 0x1be: 0x5da07, // onreset + 0x1c0: 0x51106, // output + 0x1c1: 0x12907, // sandbox + 0x1c2: 0x1b209, // plaintext + 0x1c4: 0x34c08, // textarea + 0x1c7: 0xd607, // keytype + 0x1c8: 0x34b05, // mtext + 0x1c9: 0x6b10e, // onvolumechange + 0x1ca: 0x1ea06, // onblur + 0x1cb: 0x58a07, // onpause + 0x1cd: 0x5bc0c, // onratechange + 0x1ce: 0x10705, // aside + 0x1cf: 0x6cf07, // optimum + 0x1d1: 0x45809, // onkeydown + 0x1d2: 0x1c407, // colspan + 0x1d3: 0x1004, // main + 0x1d4: 0x66b03, // sub + 0x1d5: 0x25b06, // object + 0x1d6: 0x55c06, // search + 0x1d7: 0x37206, // sorted + 0x1d8: 0x17003, // big + 0x1d9: 0xb01, // u + 0x1db: 0x26b0c, // autocomplete + 0x1dc: 0xcc02, // tr + 0x1dd: 0xf303, // alt + 0x1df: 0x7804, // samp + 0x1e0: 0x5c812, // onrejectionhandled + 0x1e1: 0x4f30c, // onmouseleave + 0x1e2: 0x28007, // enctype + 0x1e3: 0xa208, // nomodule + 0x1e5: 0x3280f, // allowfullscreen + 0x1e6: 0x5f08, // optgroup + 0x1e8: 0x27c0b, // formenctype + 0x1e9: 0x18106, // legend + 0x1ea: 0x10306, // canvas + 0x1eb: 0x6607, // pattern + 0x1ec: 0x2c208, // noscript + 0x1ed: 0x601, // i + 0x1ee: 0x5d602, // dl + 0x1ef: 0xa702, // ul + 0x1f2: 0x52209, // onmouseup + 0x1f4: 0x1ba05, // track + 0x1f7: 0x3a10a, // ondblclick + 0x1f8: 0x3bf0a, // ondragexit + 0x1fa: 0x8703, // dfn + 0x1fc: 0x26506, // action + 0x1fd: 0x35004, // area + 0x1fe: 0x31607, // marquee + 0x1ff: 0x16d03, // var } const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + @@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" + "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" + + "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" + + "ignobjectformactionautocompleteerrorformenctypemustmatchalle" + + "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" + + "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" + + "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" + + "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" + + "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" + + "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" + + "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" + + "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" + + "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" + + "databindexonloadendonloadstartonmessageerroronmousedownonmou" + + "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" + + "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" + + "npageshowbronpastepublicontenteditableonpausemaponplayingonp" + + "opstateonprogresspellcheckedonratechangeonrejectionhandledon" + + "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" + + "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" + + "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" + + "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" + + "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" + + "esummarysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go index 643c674e37..518ee4c94e 100644 --- a/vendor/golang.org/x/net/html/parse.go +++ b/vendor/golang.org/x/net/html/parse.go @@ -924,7 +924,7 @@ func inBodyIM(p *parser) bool { p.addElement() p.im = inFramesetIM return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(buttonScope, a.P) p.addElement() case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6: @@ -1136,7 +1136,7 @@ func inBodyIM(p *parser) bool { return false } return true - case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul: + case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Search, a.Section, a.Summary, a.Ul: p.popUntil(defaultScope, p.tok.DataAtom) case a.Form: if p.oe.contains(a.Template) { diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 3c57880d69..6598c1f7b3 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType { if raw { z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end])) } - // Look for a self-closing token like "
". - if z.err == nil && z.buf[z.raw.end-2] == '/' { + // Look for a self-closing token (e.g.
). + // + // Originally, we did this by just checking that the last character of the + // tag (ignoring the closing bracket) was a solidus (/) character, but this + // is not always accurate. + // + // We need to be careful that we don't misinterpret a non-self-closing tag + // as self-closing, as can happen if the tag contains unquoted attribute + // values (i.e.

). + // + // To avoid this, we check that the last non-bracket character of the tag + // (z.raw.end-2) isn't the same character as the last non-quote character of + // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has + // attributes. + nAttrs := len(z.attr) + if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) { return SelfClosingTagToken } return StartTagToken diff --git a/vendor/golang.org/x/net/http/httpproxy/proxy.go b/vendor/golang.org/x/net/http/httpproxy/proxy.go index 6404aaf157..d89c257ae7 100644 --- a/vendor/golang.org/x/net/http/httpproxy/proxy.go +++ b/vendor/golang.org/x/net/http/httpproxy/proxy.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "net" + "net/netip" "net/url" "os" "strings" @@ -177,8 +178,10 @@ func (cfg *config) useProxy(addr string) bool { if host == "localhost" { return false } - ip := net.ParseIP(host) - if ip != nil { + nip, err := netip.ParseAddr(host) + var ip net.IP + if err == nil { + ip = net.IP(nip.AsSlice()) if ip.IsLoopback() { return false } @@ -360,6 +363,9 @@ type domainMatch struct { } func (m domainMatch) match(host, port string, ip net.IP) bool { + if ip != nil { + return false + } if strings.HasSuffix(host, m.host) || (m.matchHost && host == m.host[1:]) { return m.port == "" || m.port == port } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 81faec7e75..97bd8b06f7 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -225,6 +225,11 @@ var fhBytes = sync.Pool{ }, } +func invalidHTTP1LookingFrameHeader() FrameHeader { + fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 ")) + return fh +} + // ReadFrameHeader reads 9 bytes from r and returns a FrameHeader. // Most users should use Framer.ReadFrame instead. func ReadFrameHeader(r io.Reader) (FrameHeader, error) { @@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) { return nil, err } if fh.Length > fr.maxReadSize { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, ErrFrameTooLarge } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { + if fh == invalidHTTP1LookingFrameHeader() { + return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err) + } return nil, err } f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload) diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index c7601c909f..6c18ea230b 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -34,11 +34,19 @@ import ( ) var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool - inTests bool - disableExtendedConnectProtocol bool + VerboseLogs bool + logFrameWrites bool + logFrameReads bool + inTests bool + + // Enabling extended CONNECT by causes browsers to attempt to use + // WebSockets-over-HTTP/2. This results in problems when the server's websocket + // package doesn't support extended CONNECT. + // + // Disable extended CONNECT by default for now. + // + // Issue #71128. + disableExtendedConnectProtocol = true ) func init() { @@ -51,8 +59,8 @@ func init() { logFrameWrites = true logFrameReads = true } - if strings.Contains(e, "http2xconnect=0") { - disableExtendedConnectProtocol = true + if strings.Contains(e, "http2xconnect=1") { + disableExtendedConnectProtocol = false } } @@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) { s.v = save } -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be either: -// -// - a non-empty string starting with '/' -// - the string '*', for OPTIONS requests. -// -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -// -// We used to enforce that the path also didn't start with "//", but -// Google's GFE accepts such paths and Chrome sends them, so ignore -// that part of the spec. See golang.org/issue/19103. -func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/') || v == "*" -} - // incomparable is a zero-width, non-comparable type. Adding it to a struct // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index b55547aec6..51fca38f61 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -50,6 +50,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048 func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() - buildCommonHeaderMapsOnce() - cv, ok := commonCanonHeader[v] + cv, ok := httpcommon.CachedCanonicalHeader(v) if ok { return cv } @@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) { func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { if sc.pingSent { - sc.vlogf("timeout waiting for PING response") + sc.logf("timeout waiting for PING response") + if f := sc.countErrorFunc; f != nil { + f("conn_close_lost_ping") + } sc.conn.Close() return } @@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { sc.serveG.check() - rp := requestParam{ - method: f.PseudoValue("method"), - scheme: f.PseudoValue("scheme"), - authority: f.PseudoValue("authority"), - path: f.PseudoValue("path"), - protocol: f.PseudoValue("protocol"), + rp := httpcommon.ServerRequestParam{ + Method: f.PseudoValue("method"), + Scheme: f.PseudoValue("scheme"), + Authority: f.PseudoValue("authority"), + Path: f.PseudoValue("path"), + Protocol: f.PseudoValue("protocol"), } // extended connect is disabled, so we should not see :protocol - if disableExtendedConnectProtocol && rp.protocol != "" { + if disableExtendedConnectProtocol && rp.Protocol != "" { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - isConnect := rp.method == "CONNECT" + isConnect := rp.Method == "CONNECT" if isConnect { - if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") { + if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") { return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol)) } - } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") { + } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") { // See 8.1.2.6 Malformed Requests and Responses: // // Malformed requests or responses that are detected @@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol)) } - rp.header = make(http.Header) + header := make(http.Header) + rp.Header = header for _, hf := range f.RegularFields() { - rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value) + header.Add(sc.canonicalHeader(hf.Name), hf.Value) } - if rp.authority == "" { - rp.authority = rp.header.Get("Host") + if rp.Authority == "" { + rp.Authority = header.Get("Host") } - if rp.protocol != "" { - rp.header.Set(":protocol", rp.protocol) + if rp.Protocol != "" { + header.Set(":protocol", rp.Protocol) } rw, req, err := sc.newWriterAndRequestNoBody(st, rp) @@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res } bodyOpen := !f.StreamEnded() if bodyOpen { - if vv, ok := rp.header["Content-Length"]; ok { + if vv, ok := rp.Header["Content-Length"]; ok { if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil { req.ContentLength = int64(cl) } else { @@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return rw, req, nil } -type requestParam struct { - method string - scheme, authority, path string - protocol string - header http.Header -} - -func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) { +func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) { sc.serveG.check() var tlsState *tls.ConnectionState // nil if not scheme https - if rp.scheme == "https" { + if rp.Scheme == "https" { tlsState = sc.tlsState } - needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue") - if needsContinue { - rp.header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := rp.header["Cookie"]; len(cookies) > 1 { - rp.header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range rp.header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(textproto.TrimString(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(rp.header, "Trailer") - - var url_ *url.URL - var requestURI string - if rp.method == "CONNECT" && rp.protocol == "" { - url_ = &url.URL{Host: rp.authority} - requestURI = rp.authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(rp.path) - if err != nil { - return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol)) - } - requestURI = rp.path + res := httpcommon.NewServerRequest(rp) + if res.InvalidReason != "" { + return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol)) } body := &requestBody{ conn: sc, stream: st, - needsContinue: needsContinue, + needsContinue: res.NeedsContinue, } - req := &http.Request{ - Method: rp.method, - URL: url_, + req := (&http.Request{ + Method: rp.Method, + URL: res.URL, RemoteAddr: sc.remoteAddrStr, - Header: rp.header, - RequestURI: requestURI, + Header: rp.Header, + RequestURI: res.RequestURI, Proto: "HTTP/2.0", ProtoMajor: 2, ProtoMinor: 0, TLS: tlsState, - Host: rp.authority, + Host: rp.Authority, Body: body, - Trailer: trailer, - } - req = req.WithContext(st.ctx) - + Trailer: res.Trailer, + }).WithContext(st.ctx) rw := sc.newResponseWriter(st, req) return rw, req, nil } @@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) { // we start in "half closed (remote)" for simplicity. // See further comments at the definition of stateHalfClosedRemote. promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote) - rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{ - method: msg.method, - scheme: msg.url.Scheme, - authority: msg.url.Host, - path: msg.url.RequestURI(), - header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE + rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{ + Method: msg.method, + Scheme: msg.url.Scheme, + Authority: msg.url.Host, + Path: msg.url.RequestURI(), + Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE }) if err != nil { // Should not happen, since we've already validated msg.url. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index b2e2ed3373..f26356b9cd 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "sort" "strconv" "strings" "sync" @@ -35,6 +34,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" + "golang.org/x/net/internal/httpcommon" ) const ( @@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() { // exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. var errRequestCanceled = errors.New("net/http: request canceled") -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = canonicalHeader(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", fmt.Errorf("invalid Trailer key %q", k) - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - return strings.Join(keys, ","), nil - } - return "", nil -} - func (cc *ClientConn) responseHeaderTimeout() time.Duration { if cc.t.t1 != nil { return cc.t.t1.ResponseHeaderTimeout @@ -1303,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration { return 0 } -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"]) - } - if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { - return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) - } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { - return fmt.Errorf("http2: invalid Connection request header: %q", vv) - } - return nil -} - // actualContentLength returns a sanitized version of // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. @@ -1364,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) donec: make(chan struct{}), } - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - !cs.isHead { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: https://zlib.net/zlib_faq.html#faq39 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - cs.requestedGzip = true - } + cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression()) go cs.doRequest(req, streamf) @@ -1496,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre cc := cs.cc ctx := cs.ctx - if err := checkConnHeaders(req); err != nil { - return err - } - // wait for setting frames to be received, a server can change this value later, // but we just wait for the first settings frame var isExtendedConnect bool @@ -1663,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error { // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is // sent by writeRequestBody below, along with any Trailers, // again in form HEADERS{1}, CONTINUATION{0,}) - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return err - } - hasTrailers := trailers != "" - contentLen := actualContentLength(req) - hasBody := contentLen != 0 - hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen) + cc.hbuf.Reset() + res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) { + cc.writeHeader(name, value) + }) if err != nil { - return err + return fmt.Errorf("http2: %w", err) } + hdrs := cc.hbuf.Bytes() // Write the request. - endStream := !hasBody && !hasTrailers + endStream := !res.HasBody && !res.HasTrailers cs.sentHeaders = true err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) traceWroteHeaders(cs.trace) return err } +func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) { + return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{ + Request: httpcommon.Request{ + Header: req.Header, + Trailer: req.Trailer, + URL: req.URL, + Host: req.Host, + Method: req.Method, + ActualContentLength: actualContentLength(req), + }, + AddGzipHeader: addGzipHeader, + PeerMaxHeaderListSize: peerMaxHeaderListSize, + DefaultUserAgent: defaultUserAgent, + }, headerf) +} + // cleanupWriteRequest performs post-request tasks. // // If err (the result of writeRequest) is non-nil and the stream is not closed, @@ -2070,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) } } -func validateHeaders(hdrs http.Header) string { - for k, vv := range hdrs { - if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { - return fmt.Sprintf("name %q", k) - } - for _, v := range vv { - if !httpguts.ValidHeaderFieldValue(v) { - // Don't include the value in the error, - // because it may be sensitive. - return fmt.Sprintf("value for header %q", k) - } - } - } - return "" -} - -var errNilRequestURL = errors.New("http2: Request.URI is nil") - -func isNormalConnect(req *http.Request) bool { - return req.Method == "CONNECT" && req.Header.Get(":protocol") == "" -} - -// requires cc.wmu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - if req.URL == nil { - return nil, errNilRequestURL - } - - host := req.Host - if host == "" { - host = req.URL.Host - } - host, err := httpguts.PunycodeHostPort(host) - if err != nil { - return nil, err - } - if !httpguts.ValidHostHeader(host) { - return nil, errors.New("http2: invalid Host header") - } - - var path string - if !isNormalConnect(req) { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers+trailers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - if err := validateHeaders(req.Header); err != "" { - return nil, fmt.Errorf("invalid HTTP header %s", err) - } - if err := validateHeaders(req.Trailer); err != "" { - return nil, fmt.Errorf("invalid HTTP trailer %s", err) - } - - enumerateHeaders := func(f func(name, value string)) { - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production, see Sections 3.3 and 3.4 of - // [RFC3986]). - f(":authority", host) - m := req.Method - if m == "" { - m = http.MethodGet - } - f(":method", m) - if !isNormalConnect(req) { - f(":path", path) - f(":scheme", req.URL.Scheme) - } - if trailers != "" { - f("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - } else if asciiEqualFold(k, "connection") || - asciiEqualFold(k, "proxy-connection") || - asciiEqualFold(k, "transfer-encoding") || - asciiEqualFold(k, "upgrade") || - asciiEqualFold(k, "keep-alive") { - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - } else if asciiEqualFold(k, "user-agent") { - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } else if asciiEqualFold(k, "cookie") { - // Per 8.1.2.5 To allow for better compression efficiency, the - // Cookie header field MAY be split into separate header fields, - // each with one or more cookie-pairs. - for _, v := range vv { - for { - p := strings.IndexByte(v, ';') - if p < 0 { - break - } - f("cookie", v[:p]) - p++ - // strip space after semicolon if any. - for p+1 <= len(v) && v[p] == ' ' { - p++ - } - v = v[p:] - } - if len(v) > 0 { - f("cookie", v) - } - } - continue - } - - for _, v := range vv { - f(k, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - f("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - f("accept-encoding", "gzip") - } - if !didUA { - f("user-agent", defaultUserAgent) - } - } - - // Do a first pass over the headers counting bytes to ensure - // we don't exceed cc.peerMaxHeaderListSize. This is done as a - // separate pass before encoding the headers to prevent - // modifying the hpack state. - hlSize := uint64(0) - enumerateHeaders(func(name, value string) { - hf := hpack.HeaderField{Name: name, Value: value} - hlSize += uint64(hf.Size()) - }) - - if hlSize > cc.peerMaxHeaderListSize { - return nil, errRequestHeaderListSize - } - - trace := httptrace.ContextClientTrace(req.Context()) - traceHeaders := traceHasWroteHeaderField(trace) - - // Header list size is ok. Write the headers. - enumerateHeaders(func(name, value string) { - name, ascii := lowerHeader(name) - if !ascii { - // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header - // field names have to be ASCII characters (just as in HTTP/1.x). - return - } - cc.writeHeader(name, value) - if traceHeaders { - traceWroteHeaderField(trace, name, value) - } - }) - - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - // requires cc.wmu be held. func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { cc.hbuf.Reset() @@ -2298,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) { } for k, vv := range trailer { - lowKey, ascii := lowerHeader(k) + lowKey, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). @@ -2464,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() { } cc.cond.Broadcast() cc.mu.Unlock() + + if !cc.seenSettings { + // If we have a pending request that wants extended CONNECT, + // let it continue and fail with the connection error. + cc.extendedConnectAllowed = true + close(cc.seenSettingsChan) + } } // countReadFrameError calls Transport.CountError with a string @@ -2556,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error { if VerboseLogs { cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) } - if !cc.seenSettings { - close(cc.seenSettingsChan) - } return err } } @@ -2653,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra Status: status + " " + http.StatusText(statusCode), } for _, hf := range regularFields { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) if key == "Trailer" { t := res.Trailer if t == nil { @@ -2661,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Trailer = t } foreachHeaderElement(hf.Value, func(v string) { - t[canonicalHeader(v)] = nil + t[httpcommon.CanonicalHeader(v)] = nil }) } else { vv := header[key] @@ -2785,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr trailer := make(http.Header) for _, hf := range f.RegularFields() { - key := canonicalHeader(hf.Name) + key := httpcommon.CanonicalHeader(hf.Name) trailer[key] = append(trailer[key], hf.Value) } cs.trailer = trailer @@ -3331,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") + errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize ) func (cc *ClientConn) logf(format string, args ...interface{}) { @@ -3515,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { } } -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { if trace != nil { return trace.Got1xxResponse diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6ff6bee7e9..fdb35b9477 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -13,6 +13,7 @@ import ( "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" + "golang.org/x/net/internal/httpcommon" ) // writeFramer is implemented by any type that is used to write frames. @@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } for _, k := range keys { vv := h[k] - k, ascii := lowerHeader(k) + k, ascii := httpcommon.LowerHeader(k) if !ascii { // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header // field names have to be ASCII characters (just as in HTTP/1.x). diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go new file mode 100644 index 0000000000..ed14da5afc --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go @@ -0,0 +1,53 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import "strings" + +// The HTTP protocols are defined in terms of ASCII, not Unicode. This file +// contains helper functions which may use Unicode-aware functions which would +// otherwise be unsafe and could introduce vulnerabilities if used improperly. + +// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// are equal, ASCII-case-insensitively. +func asciiEqualFold(s, t string) bool { + if len(s) != len(t) { + return false + } + for i := 0; i < len(s); i++ { + if lower(s[i]) != lower(t[i]) { + return false + } + } + return true +} + +// lower returns the ASCII lowercase version of b. +func lower(b byte) byte { + if 'A' <= b && b <= 'Z' { + return b + ('a' - 'A') + } + return b +} + +// isASCIIPrint returns whether s is ASCII and printable according to +// https://tools.ietf.org/html/rfc20#section-4.2. +func isASCIIPrint(s string) bool { + for i := 0; i < len(s); i++ { + if s[i] < ' ' || s[i] > '~' { + return false + } + } + return true +} + +// asciiToLower returns the lowercase version of s if s is ASCII and printable, +// and whether or not it was. +func asciiToLower(s string) (lower string, ok bool) { + if !isASCIIPrint(s) { + return "", false + } + return strings.ToLower(s), true +} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go similarity index 74% rename from vendor/golang.org/x/net/http2/headermap.go rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go index 149b3dd20e..92483d8e41 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go @@ -1,11 +1,11 @@ -// Copyright 2014 The Go Authors. All rights reserved. +// Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package http2 +package httpcommon import ( - "net/http" + "net/textproto" "sync" ) @@ -82,13 +82,15 @@ func buildCommonHeaderMaps() { commonLowerHeader = make(map[string]string, len(common)) commonCanonHeader = make(map[string]string, len(common)) for _, v := range common { - chk := http.CanonicalHeaderKey(v) + chk := textproto.CanonicalMIMEHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk } } -func lowerHeader(v string) (lower string, ascii bool) { +// LowerHeader returns the lowercase form of a header name, +// used on the wire for HTTP/2 and HTTP/3 requests. +func LowerHeader(v string) (lower string, ascii bool) { buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s, true @@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) { return asciiToLower(v) } -func canonicalHeader(v string) string { +// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".) +func CanonicalHeader(v string) string { buildCommonHeaderMapsOnce() if s, ok := commonCanonHeader[v]; ok { return s } - return http.CanonicalHeaderKey(v) + return textproto.CanonicalMIMEHeaderKey(v) +} + +// CachedCanonicalHeader returns the canonical form of a well-known header name. +func CachedCanonicalHeader(v string) (string, bool) { + buildCommonHeaderMapsOnce() + s, ok := commonCanonHeader[v] + return s, ok } diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go new file mode 100644 index 0000000000..4b70553179 --- /dev/null +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -0,0 +1,467 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package httpcommon + +import ( + "context" + "errors" + "fmt" + "net/http/httptrace" + "net/textproto" + "net/url" + "sort" + "strconv" + "strings" + + "golang.org/x/net/http/httpguts" + "golang.org/x/net/http2/hpack" +) + +var ( + ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit") +) + +// Request is a subset of http.Request. +// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http +// without creating a dependency cycle. +type Request struct { + URL *url.URL + Method string + Host string + Header map[string][]string + Trailer map[string][]string + ActualContentLength int64 // 0 means 0, -1 means unknown +} + +// EncodeHeadersParam is parameters to EncodeHeaders. +type EncodeHeadersParam struct { + Request Request + + // AddGzipHeader indicates that an "accept-encoding: gzip" header should be + // added to the request. + AddGzipHeader bool + + // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting. + PeerMaxHeaderListSize uint64 + + // DefaultUserAgent is the User-Agent header to send when the request + // neither contains a User-Agent nor disables it. + DefaultUserAgent string +} + +// EncodeHeadersParam is the result of EncodeHeaders. +type EncodeHeadersResult struct { + HasBody bool + HasTrailers bool +} + +// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3. +// It validates a request and calls headerf with each pseudo-header and header +// for the request. +// The headerf function is called with the validated, canonicalized header name. +func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) { + req := param.Request + + // Check for invalid connection-level headers. + if err := checkConnHeaders(req.Header); err != nil { + return res, err + } + + if req.URL == nil { + return res, errors.New("Request.URL is nil") + } + + host := req.Host + if host == "" { + host = req.URL.Host + } + host, err := httpguts.PunycodeHostPort(host) + if err != nil { + return res, err + } + if !httpguts.ValidHostHeader(host) { + return res, errors.New("invalid Host header") + } + + // isNormalConnect is true if this is a non-extended CONNECT request. + isNormalConnect := false + var protocol string + if vv := req.Header[":protocol"]; len(vv) > 0 { + protocol = vv[0] + } + if req.Method == "CONNECT" && protocol == "" { + isNormalConnect = true + } else if protocol != "" && req.Method != "CONNECT" { + return res, errors.New("invalid :protocol header in non-CONNECT request") + } + + // Validate the path, except for non-extended CONNECT requests which have no path. + var path string + if !isNormalConnect { + path = req.URL.RequestURI() + if !validPseudoPath(path) { + orig := path + path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) + if !validPseudoPath(path) { + if req.URL.Opaque != "" { + return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) + } else { + return res, fmt.Errorf("invalid request :path %q", orig) + } + } + } + } + + // Check for any invalid headers+trailers and return an error before we + // potentially pollute our hpack state. (We want to be able to + // continue to reuse the hpack encoder for future requests) + if err := validateHeaders(req.Header); err != "" { + return res, fmt.Errorf("invalid HTTP header %s", err) + } + if err := validateHeaders(req.Trailer); err != "" { + return res, fmt.Errorf("invalid HTTP trailer %s", err) + } + + trailers, err := commaSeparatedTrailers(req.Trailer) + if err != nil { + return res, err + } + + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production, see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + m := req.Method + if m == "" { + m = "GET" + } + f(":method", m) + if !isNormalConnect { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if protocol != "" { + f(":protocol", protocol) + } + if trailers != "" { + f("trailer", trailers) + } + + var didUA bool + for k, vv := range req.Header { + if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. + continue + } else if asciiEqualFold(k, "connection") || + asciiEqualFold(k, "proxy-connection") || + asciiEqualFold(k, "transfer-encoding") || + asciiEqualFold(k, "upgrade") || + asciiEqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. + continue + } else if asciiEqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + } else if asciiEqualFold(k, "cookie") { + // Per 8.1.2.5 To allow for better compression efficiency, the + // Cookie header field MAY be split into separate header fields, + // each with one or more cookie-pairs. + for _, v := range vv { + for { + p := strings.IndexByte(v, ';') + if p < 0 { + break + } + f("cookie", v[:p]) + p++ + // strip space after semicolon if any. + for p+1 <= len(v) && v[p] == ' ' { + p++ + } + v = v[p:] + } + if len(v) > 0 { + f("cookie", v) + } + } + continue + } else if k == ":protocol" { + // :protocol pseudo-header was already sent above. + continue + } + + for _, v := range vv { + f(k, v) + } + } + if shouldSendReqContentLength(req.Method, req.ActualContentLength) { + f("content-length", strconv.FormatInt(req.ActualContentLength, 10)) + } + if param.AddGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", param.DefaultUserAgent) + } + } + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + if param.PeerMaxHeaderListSize > 0 { + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > param.PeerMaxHeaderListSize { + return res, ErrRequestHeaderListSize + } + } + + trace := httptrace.ContextClientTrace(ctx) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name, ascii := LowerHeader(name) + if !ascii { + // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header + // field names have to be ASCII characters (just as in HTTP/1.x). + return + } + + headerf(name, value) + + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(name, []string{value}) + } + }) + + res.HasBody = req.ActualContentLength != 0 + res.HasTrailers = trailers != "" + return res, nil +} + +// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header +// for a request. +func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool { + // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? + if !disableCompression && + len(header["Accept-Encoding"]) == 0 && + len(header["Range"]) == 0 && + method != "HEAD" { + // Request gzip only, not deflate. Deflate is ambiguous and + // not as universally supported anyway. + // See: https://zlib.net/zlib_faq.html#faq39 + // + // Note that we don't request this for HEAD requests, + // due to a bug in nginx: + // http://trac.nginx.org/nginx/ticket/358 + // https://golang.org/issue/5522 + // + // We don't request gzip if the request is for a range, since + // auto-decoding a portion of a gzipped document will just fail + // anyway. See https://golang.org/issue/8923 + return true + } + return false +} + +// checkConnHeaders checks whether req has any invalid connection-level headers. +// +// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3 +// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1 +// +// Certain headers are special-cased as okay but not transmitted later. +// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding. +func checkConnHeaders(h map[string][]string) error { + if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Upgrade request header: %q", vv) + } + if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { + return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv) + } + if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) { + return fmt.Errorf("invalid Connection request header: %q", vv) + } + return nil +} + +func commaSeparatedTrailers(trailer map[string][]string) (string, error) { + keys := make([]string, 0, len(trailer)) + for k := range trailer { + k = CanonicalHeader(k) + switch k { + case "Transfer-Encoding", "Trailer", "Content-Length": + return "", fmt.Errorf("invalid Trailer key %q", k) + } + keys = append(keys, k) + } + if len(keys) > 0 { + sort.Strings(keys) + return strings.Join(keys, ","), nil + } + return "", nil +} + +// validPseudoPath reports whether v is a valid :path pseudo-header +// value. It must be either: +// +// - a non-empty string starting with '/' +// - the string '*', for OPTIONS requests. +// +// For now this is only used a quick check for deciding when to clean +// up Opaque URLs before sending requests from the Transport. +// See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. +func validPseudoPath(v string) bool { + return (len(v) > 0 && v[0] == '/') || v == "*" +} + +func validateHeaders(hdrs map[string][]string) string { + for k, vv := range hdrs { + if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" { + return fmt.Sprintf("name %q", k) + } + for _, v := range vv { + if !httpguts.ValidHeaderFieldValue(v) { + // Don't include the value in the error, + // because it may be sensitive. + return fmt.Sprintf("value for header %q", k) + } + } + } + return "" +} + +// shouldSendReqContentLength reports whether we should send +// a "content-length" request header. This logic is basically a copy of the net/http +// transferWriter.shouldSendContentLength. +// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). +// -1 means unknown. +func shouldSendReqContentLength(method string, contentLength int64) bool { + if contentLength > 0 { + return true + } + if contentLength < 0 { + return false + } + // For zero bodies, whether we send a content-length depends on the method. + // It also kinda doesn't matter for http2 either way, with END_STREAM. + switch method { + case "POST", "PUT", "PATCH": + return true + default: + return false + } +} + +// ServerRequestParam is parameters to NewServerRequest. +type ServerRequestParam struct { + Method string + Scheme, Authority, Path string + Protocol string + Header map[string][]string +} + +// ServerRequestResult is the result of NewServerRequest. +type ServerRequestResult struct { + // Various http.Request fields. + URL *url.URL + RequestURI string + Trailer map[string][]string + + NeedsContinue bool // client provided an "Expect: 100-continue" header + + // If the request should be rejected, this is a short string suitable for passing + // to the http2 package's CountError function. + // It might be a bit odd to return errors this way rather than returing an error, + // but this ensures we don't forget to include a CountError reason. + InvalidReason string +} + +func NewServerRequest(rp ServerRequestParam) ServerRequestResult { + needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue") + if needsContinue { + delete(rp.Header, "Expect") + } + // Merge Cookie headers into one "; "-delimited value. + if cookies := rp.Header["Cookie"]; len(cookies) > 1 { + rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")} + } + + // Setup Trailers + var trailer map[string][]string + for _, v := range rp.Header["Trailer"] { + for _, key := range strings.Split(v, ",") { + key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key)) + switch key { + case "Transfer-Encoding", "Trailer", "Content-Length": + // Bogus. (copy of http1 rules) + // Ignore. + default: + if trailer == nil { + trailer = make(map[string][]string) + } + trailer[key] = nil + } + } + } + delete(rp.Header, "Trailer") + + // "':authority' MUST NOT include the deprecated userinfo subcomponent + // for "http" or "https" schemed URIs." + // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8 + if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") { + return ServerRequestResult{ + InvalidReason: "userinfo_in_authority", + } + } + + var url_ *url.URL + var requestURI string + if rp.Method == "CONNECT" && rp.Protocol == "" { + url_ = &url.URL{Host: rp.Authority} + requestURI = rp.Authority // mimic HTTP/1 server behavior + } else { + var err error + url_, err = url.ParseRequestURI(rp.Path) + if err != nil { + return ServerRequestResult{ + InvalidReason: "bad_path", + } + } + requestURI = rp.Path + } + + return ServerRequestResult{ + URL: url_, + NeedsContinue: needsContinue, + RequestURI: requestURI, + Trailer: trailer, + } +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go index d7d4b8b6e3..32bdf435ec 100644 --- a/vendor/golang.org/x/net/proxy/per_host.go +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -7,6 +7,7 @@ package proxy import ( "context" "net" + "net/netip" "strings" ) @@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net. } func (p *PerHost) dialerForRequest(host string) Dialer { - if ip := net.ParseIP(host); ip != nil { + if nip, err := netip.ParseAddr(host); err == nil { + ip := net.IP(nip.AsSlice()) for _, net := range p.bypassNetworks { if net.Contains(ip) { return p.bypass @@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) { } continue } - if ip := net.ParseIP(host); ip != nil { - p.AddIP(ip) + if nip, err := netip.ParseAddr(host); err == nil { + p.AddIP(net.IP(nip.AsSlice())) continue } if strings.HasPrefix(host, "*.") { diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go index ac76165ceb..3448d20395 100644 --- a/vendor/golang.org/x/net/websocket/websocket.go +++ b/vendor/golang.org/x/net/websocket/websocket.go @@ -6,9 +6,10 @@ // as specified in RFC 6455. // // This package currently lacks some features found in an alternative -// and more actively maintained WebSocket package: +// and more actively maintained WebSocket packages: // -// https://pkg.go.dev/github.com/coder/websocket +// - [github.com/gorilla/websocket] +// - [github.com/coder/websocket] package websocket // import "golang.org/x/net/websocket" import ( diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 948a3ee63d..a4ea5d14f1 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -46,7 +46,7 @@ func (g *Group) done() { // returns a non-nil error or the first time Wait returns, whichever occurs // first. func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := withCancelCause(ctx) + ctx, cancel := context.WithCancelCause(ctx) return &Group{cancel: cancel}, ctx } @@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool { // SetLimit limits the number of active goroutines in this group to at most n. // A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. // // Any subsequent call to the Go method will block until it can add an active // goroutine without exceeding the configured limit. diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go deleted file mode 100644 index f93c740b63..0000000000 --- a/vendor/golang.org/x/sync/errgroup/go120.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - return context.WithCancelCause(parent) -} diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go deleted file mode 100644 index 88ce33434e..0000000000 --- a/vendor/golang.org/x/sync/errgroup/pre_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.20 - -package errgroup - -import "context" - -func withCancelCause(parent context.Context) (context.Context, func(error)) { - ctx, cancel := context.WithCancel(parent) - return ctx, func(error) { cancel() } -} diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index 02609d5b21..9c105f23af 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -72,6 +72,9 @@ var X86 struct { HasSSSE3 bool // Supplemental streaming SIMD extension 3 HasSSE41 bool // Streaming SIMD extension 4 and 4.1 HasSSE42 bool // Streaming SIMD extension 4 and 4.2 + HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add + HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions + HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions _ CacheLinePad } diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go index 600a680786..1e642f3304 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_x86.go +++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go @@ -53,6 +53,9 @@ func initOptions() { {Name: "sse41", Feature: &X86.HasSSE41}, {Name: "sse42", Feature: &X86.HasSSE42}, {Name: "ssse3", Feature: &X86.HasSSSE3}, + {Name: "avxifma", Feature: &X86.HasAVXIFMA}, + {Name: "avxvnni", Feature: &X86.HasAVXVNNI}, + {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8}, // These capabilities should always be enabled on amd64: {Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"}, @@ -106,7 +109,7 @@ func archInit() { return } - _, ebx7, ecx7, edx7 := cpuid(7, 0) + eax7, ebx7, ecx7, edx7 := cpuid(7, 0) X86.HasBMI1 = isSet(3, ebx7) X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX X86.HasBMI2 = isSet(8, ebx7) @@ -134,14 +137,24 @@ func archInit() { X86.HasAVX512VAES = isSet(9, ecx7) X86.HasAVX512VBMI2 = isSet(6, ecx7) X86.HasAVX512BITALG = isSet(12, ecx7) - - eax71, _, _, _ := cpuid(7, 1) - X86.HasAVX512BF16 = isSet(5, eax71) } X86.HasAMXTile = isSet(24, edx7) X86.HasAMXInt8 = isSet(25, edx7) X86.HasAMXBF16 = isSet(22, edx7) + + // These features depend on the second level of extended features. + if eax7 >= 1 { + eax71, _, _, edx71 := cpuid(7, 1) + if X86.HasAVX512 { + X86.HasAVX512BF16 = isSet(5, eax71) + } + if X86.HasAVX { + X86.HasAVXIFMA = isSet(23, eax71) + X86.HasAVXVNNI = isSet(4, eax71) + X86.HasAVXVNNIInt8 = isSet(4, edx71) + } + } } func isSet(bitpos uint, value uint32) bool { diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go new file mode 100644 index 0000000000..37a82528f5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv.go @@ -0,0 +1,36 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import ( + "syscall" + "unsafe" +) + +//go:linkname runtime_getAuxv runtime.getAuxv +func runtime_getAuxv() []uintptr + +// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs. +// The returned slice is always a fresh copy, owned by the caller. +// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed, +// which happens in some locked-down environments and build modes. +func Auxv() ([][2]uintptr, error) { + vec := runtime_getAuxv() + vecLen := len(vec) + + if vecLen == 0 { + return nil, syscall.ENOENT + } + + if vecLen%2 != 0 { + return nil, syscall.EINVAL + } + + result := make([]uintptr, vecLen) + copy(result, vec) + return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil +} diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go new file mode 100644 index 0000000000..1200487f2e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos) + +package unix + +import "syscall" + +func Auxv() ([][2]uintptr, error) { + return nil, syscall.ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 21974af064..abc3955477 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) { func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) { return ioctlPtrRet(fd, req, unsafe.Pointer(s)) } + +// Ucred Helpers +// See ucred(3c) and getpeerucred(3c) + +//sys getpeerucred(fd uintptr, ucred *uintptr) (err error) +//sys ucredFree(ucred uintptr) = ucred_free +//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get +//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid +//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid +//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid +//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid +//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid +//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid +//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid + +// Ucred is an opaque struct that holds user credentials. +type Ucred struct { + ucred uintptr +} + +// We need to ensure that ucredFree is called on the underlying ucred +// when the Ucred is garbage collected. +func ucredFinalizer(u *Ucred) { + ucredFree(u.ucred) +} + +func GetPeerUcred(fd uintptr) (*Ucred, error) { + var ucred uintptr + err := getpeerucred(fd, &ucred) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func UcredGet(pid int) (*Ucred, error) { + ucred, err := ucredGet(pid) + if err != nil { + return nil, err + } + result := &Ucred{ + ucred: ucred, + } + // set the finalizer on the result so that the ucred will be freed + runtime.SetFinalizer(result, ucredFinalizer) + return result, nil +} + +func (u *Ucred) Geteuid() int { + defer runtime.KeepAlive(u) + return ucredGeteuid(u.ucred) +} + +func (u *Ucred) Getruid() int { + defer runtime.KeepAlive(u) + return ucredGetruid(u.ucred) +} + +func (u *Ucred) Getsuid() int { + defer runtime.KeepAlive(u) + return ucredGetsuid(u.ucred) +} + +func (u *Ucred) Getegid() int { + defer runtime.KeepAlive(u) + return ucredGetegid(u.ucred) +} + +func (u *Ucred) Getrgid() int { + defer runtime.KeepAlive(u) + return ucredGetrgid(u.ucred) +} + +func (u *Ucred) Getsgid() int { + defer runtime.KeepAlive(u) + return ucredGetsgid(u.ucred) +} + +func (u *Ucred) Getpid() int { + defer runtime.KeepAlive(u) + return ucredGetpid(u.ucred) +} diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 6ebc48b3fe..4f432bfe8f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -1245,6 +1245,7 @@ const ( FAN_REPORT_DFID_NAME = 0xc00 FAN_REPORT_DFID_NAME_TARGET = 0x1e00 FAN_REPORT_DIR_FID = 0x400 + FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 @@ -1330,8 +1331,10 @@ const ( FUSE_SUPER_MAGIC = 0x65735546 FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 + F_CREATED_QUERY = 0x404 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 + F_DUPFD_QUERY = 0x403 F_EXLCK = 0x4 F_GETFD = 0x1 F_GETFL = 0x3 @@ -1551,6 +1554,7 @@ const ( IPPROTO_ROUTING = 0x2b IPPROTO_RSVP = 0x2e IPPROTO_SCTP = 0x84 + IPPROTO_SMC = 0x100 IPPROTO_TCP = 0x6 IPPROTO_TP = 0x1d IPPROTO_UDP = 0x11 @@ -1623,6 +1627,8 @@ const ( IPV6_UNICAST_IF = 0x4c IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a + IPV6_VERSION = 0x60 + IPV6_VERSION_MASK = 0xf0 IPV6_XFRM_POLICY = 0x23 IP_ADD_MEMBERSHIP = 0x23 IP_ADD_SOURCE_MEMBERSHIP = 0x27 @@ -1867,6 +1873,7 @@ const ( MADV_UNMERGEABLE = 0xd MADV_WILLNEED = 0x3 MADV_WIPEONFORK = 0x12 + MAP_DROPPABLE = 0x8 MAP_FILE = 0x0 MAP_FIXED = 0x10 MAP_FIXED_NOREPLACE = 0x100000 @@ -1967,6 +1974,7 @@ const ( MSG_PEEK = 0x2 MSG_PROXY = 0x10 MSG_RST = 0x1000 + MSG_SOCK_DEVMEM = 0x2000000 MSG_SYN = 0x400 MSG_TRUNC = 0x20 MSG_TRYHARD = 0x4 @@ -2083,6 +2091,7 @@ const ( NFC_ATR_REQ_MAXSIZE = 0x40 NFC_ATR_RES_GB_MAXSIZE = 0x2f NFC_ATR_RES_MAXSIZE = 0x40 + NFC_ATS_MAXSIZE = 0x14 NFC_COMM_ACTIVE = 0x0 NFC_COMM_PASSIVE = 0x1 NFC_DEVICE_NAME_MAXSIZE = 0x8 @@ -2163,6 +2172,7 @@ const ( NFNL_SUBSYS_QUEUE = 0x3 NFNL_SUBSYS_ULOG = 0x4 NFS_SUPER_MAGIC = 0x6969 + NFT_BITWISE_BOOL = 0x0 NFT_CHAIN_FLAGS = 0x7 NFT_CHAIN_MAXNAMELEN = 0x100 NFT_CT_MAX = 0x17 @@ -2491,6 +2501,7 @@ const ( PR_GET_PDEATHSIG = 0x2 PR_GET_SECCOMP = 0x15 PR_GET_SECUREBITS = 0x1b + PR_GET_SHADOW_STACK_STATUS = 0x4a PR_GET_SPECULATION_CTRL = 0x34 PR_GET_TAGGED_ADDR_CTRL = 0x38 PR_GET_THP_DISABLE = 0x2a @@ -2499,6 +2510,7 @@ const ( PR_GET_TIMING = 0xd PR_GET_TSC = 0x19 PR_GET_UNALIGN = 0x5 + PR_LOCK_SHADOW_STACK_STATUS = 0x4c PR_MCE_KILL = 0x21 PR_MCE_KILL_CLEAR = 0x0 PR_MCE_KILL_DEFAULT = 0x2 @@ -2525,6 +2537,8 @@ const ( PR_PAC_GET_ENABLED_KEYS = 0x3d PR_PAC_RESET_KEYS = 0x36 PR_PAC_SET_ENABLED_KEYS = 0x3c + PR_PMLEN_MASK = 0x7f000000 + PR_PMLEN_SHIFT = 0x18 PR_PPC_DEXCR_CTRL_CLEAR = 0x4 PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10 PR_PPC_DEXCR_CTRL_EDITABLE = 0x1 @@ -2592,6 +2606,7 @@ const ( PR_SET_PTRACER = 0x59616d61 PR_SET_SECCOMP = 0x16 PR_SET_SECUREBITS = 0x1c + PR_SET_SHADOW_STACK_STATUS = 0x4b PR_SET_SPECULATION_CTRL = 0x35 PR_SET_SYSCALL_USER_DISPATCH = 0x3b PR_SET_TAGGED_ADDR_CTRL = 0x37 @@ -2602,6 +2617,9 @@ const ( PR_SET_UNALIGN = 0x6 PR_SET_VMA = 0x53564d41 PR_SET_VMA_ANON_NAME = 0x0 + PR_SHADOW_STACK_ENABLE = 0x1 + PR_SHADOW_STACK_PUSH = 0x4 + PR_SHADOW_STACK_WRITE = 0x2 PR_SME_GET_VL = 0x40 PR_SME_SET_VL = 0x3f PR_SME_SET_VL_ONEXEC = 0x40000 @@ -2911,7 +2929,6 @@ const ( RTM_NEWNEXTHOP = 0x68 RTM_NEWNEXTHOPBUCKET = 0x74 RTM_NEWNSID = 0x58 - RTM_NEWNVLAN = 0x70 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 RTM_NEWROUTE = 0x18 @@ -2920,6 +2937,7 @@ const ( RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c RTM_NEWTUNNEL = 0x78 + RTM_NEWVLAN = 0x70 RTM_NR_FAMILIES = 0x1b RTM_NR_MSGTYPES = 0x6c RTM_SETDCB = 0x4f diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index c0d45e3205..75207613c7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -304,6 +306,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index c731d24f02..c68acda535 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -305,6 +307,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 680018a4a7..a8c607ab86 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -310,6 +312,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index a63909f308..18563dd8d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -109,6 +109,7 @@ const ( F_SETOWN = 0x8 F_UNLCK = 0x2 F_WRLCK = 0x1 + GCS_MAGIC = 0x47435300 HIDIOCGRAWINFO = 0x80084803 HIDIOCGRDESC = 0x90044802 HIDIOCGRDESCSIZE = 0x80044801 @@ -119,6 +120,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -302,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 9b0a2573fe..22912cdaa9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -116,6 +116,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -297,6 +299,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 958e6e0645..29344eb37a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 50c7f25bd1..20d51fb96a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ced21d66d9..321b60902a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 226c044190..9bacdf1e27 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x80 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -303,6 +305,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 3122737cd4..c224272615 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -358,6 +360,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index eb5d3467ed..6270c8ee13 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index e921ebc60b..9966c1941f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x80 IUCLC = 0x1000 IXOFF = 0x400 @@ -362,6 +364,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 38ba81c55c..848e5fcc42 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xffffff0f + IPV6_FLOWLABEL_MASK = 0xffff0f00 ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -294,6 +296,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 71f0400977..669b2adb80 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -115,6 +115,8 @@ const ( IN_CLOEXEC = 0x80000 IN_NONBLOCK = 0x800 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -366,6 +368,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x36 SCM_TIMESTAMPING_PKTINFO = 0x3a SCM_TIMESTAMPNS = 0x23 + SCM_TS_OPT_ID = 0x51 SCM_TXTIME = 0x3d SCM_WIFI_STATUS = 0x29 SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c44a313322..4834e57514 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -119,6 +119,8 @@ const ( IN_CLOEXEC = 0x400000 IN_NONBLOCK = 0x4000 IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9 + IPV6_FLOWINFO_MASK = 0xfffffff + IPV6_FLOWLABEL_MASK = 0xfffff ISIG = 0x1 IUCLC = 0x200 IXOFF = 0x1000 @@ -357,6 +359,7 @@ const ( SCM_TIMESTAMPING_OPT_STATS = 0x38 SCM_TIMESTAMPING_PKTINFO = 0x3c SCM_TIMESTAMPNS = 0x21 + SCM_TS_OPT_ID = 0x5a SCM_TXTIME = 0x3f SCM_WIFI_STATUS = 0x25 SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 829b87feb8..c6545413c4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -141,6 +141,16 @@ import ( //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so" //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so" //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so" +//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so" +//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so" +//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so" +//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so" +//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so" +//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so" +//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so" +//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so" //go:cgo_import_dynamic libc_port_create port_create "libc.so" //go:cgo_import_dynamic libc_port_associate port_associate "libc.so" //go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so" @@ -280,6 +290,16 @@ import ( //go:linkname procgetpeername libc_getpeername //go:linkname procsetsockopt libc_setsockopt //go:linkname procrecvfrom libc_recvfrom +//go:linkname procgetpeerucred libc_getpeerucred +//go:linkname procucred_get libc_ucred_get +//go:linkname procucred_geteuid libc_ucred_geteuid +//go:linkname procucred_getegid libc_ucred_getegid +//go:linkname procucred_getruid libc_ucred_getruid +//go:linkname procucred_getrgid libc_ucred_getrgid +//go:linkname procucred_getsuid libc_ucred_getsuid +//go:linkname procucred_getsgid libc_ucred_getsgid +//go:linkname procucred_getpid libc_ucred_getpid +//go:linkname procucred_free libc_ucred_free //go:linkname procport_create libc_port_create //go:linkname procport_associate libc_port_associate //go:linkname procport_dissociate libc_port_dissociate @@ -420,6 +440,16 @@ var ( procgetpeername, procsetsockopt, procrecvfrom, + procgetpeerucred, + procucred_get, + procucred_geteuid, + procucred_getegid, + procucred_getruid, + procucred_getrgid, + procucred_getsuid, + procucred_getsgid, + procucred_getpid, + procucred_free, procport_create, procport_associate, procport_dissociate, @@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getpeerucred(fd uintptr, ucred *uintptr) (err error) { + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGet(pid int) (ucred uintptr, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0) + ucred = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGeteuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetegid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetruid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetrgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsuid(ucred uintptr) (uid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + uid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetsgid(ucred uintptr) (gid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + gid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredGetpid(ucred uintptr) (pid int) { + r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + pid = int(r0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ucredFree(ucred uintptr) { + sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0) + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func port_create() (n int, err error) { r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 524b0820cb..c79aaff306 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -458,4 +458,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index f485dbf456..5eb450695e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -381,4 +381,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 70b35bf3b0..05e5029744 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -422,4 +422,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 1893e2fe88..38c53ec51b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -325,4 +325,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 16a4017da0..31d2e71a18 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -321,4 +321,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 7e567f1eff..f4184a336b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 38ae55e5ef..05b9962278 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 55e92e60a8..43a256e9e6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -372,4 +372,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 5460 SYS_LSM_LIST_MODULES = 5461 SYS_MSEAL = 5462 + SYS_SETXATTRAT = 5463 + SYS_GETXATTRAT = 5464 + SYS_LISTXATTRAT = 5465 + SYS_REMOVEXATTRAT = 5466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 60658d6a02..eea5ddfc22 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -442,4 +442,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 4460 SYS_LSM_LIST_MODULES = 4461 SYS_MSEAL = 4462 + SYS_SETXATTRAT = 4463 + SYS_GETXATTRAT = 4464 + SYS_LISTXATTRAT = 4465 + SYS_REMOVEXATTRAT = 4466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index e203e8a7ed..0d777bfbb1 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -449,4 +449,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5944b97d54..b446365025 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index c66d416dad..0c7d21c188 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -421,4 +421,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index a5459e766f..8405391698 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -326,4 +326,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 01d86825bb..fcf1b790d6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -387,4 +387,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 7b703e77cd..52d15b5f9d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -400,4 +400,8 @@ const ( SYS_LSM_SET_SELF_ATTR = 460 SYS_LSM_LIST_MODULES = 461 SYS_MSEAL = 462 + SYS_SETXATTRAT = 463 + SYS_GETXATTRAT = 464 + SYS_LISTXATTRAT = 465 + SYS_REMOVEXATTRAT = 466 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 5537148dcb..a46abe6472 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -4747,7 +4747,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14c + NL80211_ATTR_MAX = 0x14d NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5519,7 +5519,7 @@ const ( NL80211_MNTR_FLAG_CONTROL = 0x3 NL80211_MNTR_FLAG_COOK_FRAMES = 0x5 NL80211_MNTR_FLAG_FCSFAIL = 0x1 - NL80211_MNTR_FLAG_MAX = 0x6 + NL80211_MNTR_FLAG_MAX = 0x7 NL80211_MNTR_FLAG_OTHER_BSS = 0x4 NL80211_MNTR_FLAG_PLCPFAIL = 0x2 NL80211_MPATH_FLAG_ACTIVE = 0x1 @@ -6174,3 +6174,5 @@ type SockDiagReq struct { Family uint8 Protocol uint8 } + +const RTM_NEWNVLAN = 0x70 diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go index cd94c5dc4e..1aadcf4077 100644 --- a/vendor/golang.org/x/text/internal/number/format.go +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -394,9 +394,7 @@ func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, p exp := n.Exp - int32(n.Comma) exponential := f.Symbol(SymExponential) if exponential == "E" { - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) - dst = append(dst, "\u202f"...) // NARROW NO-BREAK SPACE dst = f.AppendDigit(dst, 1) dst = f.AppendDigit(dst, 0) switch { diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go index 4d57222e77..053336e286 100644 --- a/vendor/golang.org/x/text/language/parse.go +++ b/vendor/golang.org/x/text/language/parse.go @@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) { if changed { tt.RemakeString() } - return makeTag(tt), err + return makeTag(tt), nil } // Compose creates a Tag from individual parts, which may be of type Tag, Base, diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go index cffdfda961..737d6876d5 100644 --- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go @@ -192,11 +192,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, skipTypeURL bool) erro fd = fieldDescs.ByTextName(name) } } - if flags.ProtoLegacyWeak { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } if fd == nil { // Field is unknown. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index d972a3d98e..b53805056a 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacyWeak { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c76044..669133d04d 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 378b826faa..688aabe434 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -275,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b327c..d4c94458bd 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea44c..e1b4130bd2 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 5cb3ee70f9..a06ccabc2f 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // @@ -22,8 +22,3 @@ const ProtoLegacy = protoLegacy // extension fields at unmarshal time, but defers creating the message // structure until the extension is first accessed. const LazyUnmarshalExtensions = ProtoLegacy - -// ProtoLegacyWeak specifies whether to enable support for weak fields. -// This flag was split out of ProtoLegacy in preparation for removing -// support for weak fields (independent of the other protolegacy features). -const ProtoLegacyWeak = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e1f..99bb95bafd 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8c1..d14d7d93cc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 111d95833d..f78b57b046 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -119,9 +119,6 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { } case isOneof: fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) default: fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index f81d7d0db9..41c1f74ef8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -46,9 +46,6 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): fieldOffset = offsetOf(fs) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) case fd.Message() != nil && !fd.IsMap(): fieldOffset = offsetOf(fs) if fd.IsList() { diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go index e8fb6c35b4..c7de31e243 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ fmi := f.validation.mi if fmi == nil { fd := mi.Desc.Fields().ByNumber(f.num) - if fd == nil || !fd.IsWeak() { + if fd == nil { return out, ValidationUnknown } messageName := fd.Message().FullName() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049b4..a51dffbe29 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index d1f79b4224..d50423dcb7 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -120,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -129,8 +127,6 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset @@ -148,7 +144,6 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, lazyOffset: invalidOffset, @@ -168,11 +163,6 @@ fieldLoop: si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { si.unknownOffset = offsetOf(f) @@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d8dcd78863..dd55e8e009 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool { usePresence, _ := usePresenceForField(si, fd) switch { - case fd.IsWeak(): - // Weak fields are no different for opaque. - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): // Oneofs are no different for opaque. fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) @@ -620,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) ( switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): return false, false - case fd.IsWeak(): - return false, false case fd.IsMap(): return false, false case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 31c19b54f8..0d20132fa2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index 3cd1fbc21f..68d4ae32ec 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -332,79 +329,6 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacyWeak { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 6bed45e35c..62f8bf663e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index b534a3d6db..7b2995dde5 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -320,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba94..0000000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index f5c06280fe..01efc33030 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 3 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index e28d7acb37..4cbf1aeaf7 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacyWeak { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 69a0505091..823dbf3ba6 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -132,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index ebcb4a8ab1..9da34998b1 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab29c..ff692436e9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 5eaf652176..c343d9227b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacyWeak { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index f55b036959..697a61b290 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -128,23 +129,39 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp // We must not use proto.GetExtension(child, gofeaturespb.E_Go) // because that only works for messages we generated, but not for // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) if !goFeatures.IsValid() { return parentFS } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. - gf := goFeatures.Message() fields := gf.Descriptor().Fields() - if fd := fields.ByName("legacy_unmarshal_json_enum"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() } - if fd := fields.ByName("strip_enum_prefix"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) } - if fd := fields.ByName("api_level"); gf.Has(fd) { + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { parentFS.APILevel = int(gf.Get(fd).Enum()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d4001..9b880aa8c9 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadbaf8..cd7fbc87a4 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a551e7ae94..a516337674 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -4360,7 +4361,7 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -5130,16 +5131,16 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5292,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5304,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index e0b72eaf92..28d24bad79 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,6 +16,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) type GoFeatures_APILevel int32 @@ -227,7 +228,7 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, @@ -283,16 +284,16 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } @@ -326,7 +327,7 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), NumEnums: 2, NumMessages: 1, NumExtensions: 1, @@ -339,7 +340,6 @@ func file_google_protobuf_go_features_proto_init() { ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 191552cce0..497da66e91 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go index 34d76e6cd9..193880d181 100644 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -80,6 +80,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Duration represents a signed, fixed-length span of time represented @@ -288,7 +289,7 @@ func (x *Duration) GetNanos() int32 { var File_google_protobuf_duration_proto protoreflect.FileDescriptor -var file_google_protobuf_duration_proto_rawDesc = []byte{ +var file_google_protobuf_duration_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -305,16 +306,16 @@ var file_google_protobuf_duration_proto_rawDesc = []byte{ 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc + file_google_protobuf_duration_proto_rawDescData []byte ) func file_google_protobuf_duration_proto_rawDescGZIP() []byte { file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc))) }) return file_google_protobuf_duration_proto_rawDescData } @@ -340,7 +341,7 @@ func file_google_protobuf_duration_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -351,7 +352,6 @@ func file_google_protobuf_duration_proto_init() { MessageInfos: file_google_protobuf_duration_proto_msgTypes, }.Build() File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil file_google_protobuf_duration_proto_goTypes = nil file_google_protobuf_duration_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go index 7fcdd382cc..a5b8657c4b 100644 --- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go @@ -38,6 +38,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // A generic empty message that you can re-use to avoid defining duplicated @@ -85,7 +86,7 @@ func (*Empty) Descriptor() ([]byte, []int) { var File_google_protobuf_empty_proto protoreflect.FileDescriptor -var file_google_protobuf_empty_proto_rawDesc = []byte{ +var file_google_protobuf_empty_proto_rawDesc = string([]byte{ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07, @@ -98,16 +99,16 @@ var file_google_protobuf_empty_proto_rawDesc = []byte{ 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_empty_proto_rawDescOnce sync.Once - file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc + file_google_protobuf_empty_proto_rawDescData []byte ) func file_google_protobuf_empty_proto_rawDescGZIP() []byte { file_google_protobuf_empty_proto_rawDescOnce.Do(func() { - file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData) + file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc))) }) return file_google_protobuf_empty_proto_rawDescData } @@ -133,7 +134,7 @@ func file_google_protobuf_empty_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_empty_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -144,7 +145,6 @@ func file_google_protobuf_empty_proto_init() { MessageInfos: file_google_protobuf_empty_proto_msgTypes, }.Build() File_google_protobuf_empty_proto = out.File - file_google_protobuf_empty_proto_rawDesc = nil file_google_protobuf_empty_proto_goTypes = nil file_google_protobuf_empty_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go index e5d7da38c2..041feb0f3e 100644 --- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go @@ -83,6 +83,7 @@ import ( sort "sort" strings "strings" sync "sync" + unsafe "unsafe" ) // `FieldMask` represents a set of symbolic field paths, for example: @@ -503,7 +504,7 @@ func (x *FieldMask) GetPaths() []string { var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor -var file_google_protobuf_field_mask_proto_rawDesc = []byte{ +var file_google_protobuf_field_mask_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -519,16 +520,16 @@ var file_google_protobuf_field_mask_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_field_mask_proto_rawDescOnce sync.Once - file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc + file_google_protobuf_field_mask_proto_rawDescData []byte ) func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte { file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() { - file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData) + file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc))) }) return file_google_protobuf_field_mask_proto_rawDescData } @@ -554,7 +555,7 @@ func file_google_protobuf_field_mask_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -565,7 +566,6 @@ func file_google_protobuf_field_mask_proto_init() { MessageInfos: file_google_protobuf_field_mask_proto_msgTypes, }.Build() File_google_protobuf_field_mask_proto = out.File - file_google_protobuf_field_mask_proto_rawDesc = nil file_google_protobuf_field_mask_proto_goTypes = nil file_google_protobuf_field_mask_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go index f2c53ea337..ecdd31ab53 100644 --- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go @@ -128,6 +128,7 @@ import ( reflect "reflect" sync "sync" utf8 "unicode/utf8" + unsafe "unsafe" ) // `NullValue` is a singleton enumeration to represent the null value for the @@ -671,7 +672,7 @@ func (x *ListValue) GetValues() []*Value { var File_google_protobuf_struct_proto protoreflect.FileDescriptor -var file_google_protobuf_struct_proto_rawDesc = []byte{ +var file_google_protobuf_struct_proto_rawDesc = string([]byte{ 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, @@ -719,16 +720,16 @@ var file_google_protobuf_struct_proto_rawDesc = []byte{ 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_struct_proto_rawDescOnce sync.Once - file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc + file_google_protobuf_struct_proto_rawDescData []byte ) func file_google_protobuf_struct_proto_rawDescGZIP() []byte { file_google_protobuf_struct_proto_rawDescOnce.Do(func() { - file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData) + file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc))) }) return file_google_protobuf_struct_proto_rawDescData } @@ -773,7 +774,7 @@ func file_google_protobuf_struct_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_struct_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)), NumEnums: 1, NumMessages: 4, NumExtensions: 0, @@ -785,7 +786,6 @@ func file_google_protobuf_struct_proto_init() { MessageInfos: file_google_protobuf_struct_proto_msgTypes, }.Build() File_google_protobuf_struct_proto = out.File - file_google_protobuf_struct_proto_rawDesc = nil file_google_protobuf_struct_proto_goTypes = nil file_google_protobuf_struct_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 9550109aa3..00ac835c0b 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -78,6 +78,7 @@ import ( reflect "reflect" sync "sync" time "time" + unsafe "unsafe" ) // A Timestamp represents a point in time independent of any time zone or local @@ -297,7 +298,7 @@ func (x *Timestamp) GetNanos() int32 { var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ +var file_google_protobuf_timestamp_proto_rawDesc = string([]byte{ 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, @@ -314,16 +315,16 @@ var file_google_protobuf_timestamp_proto_rawDesc = []byte{ 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc + file_google_protobuf_timestamp_proto_rawDescData []byte ) func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc))) }) return file_google_protobuf_timestamp_proto_rawDescData } @@ -349,7 +350,7 @@ func file_google_protobuf_timestamp_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -360,7 +361,6 @@ func file_google_protobuf_timestamp_proto_init() { MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, }.Build() File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil file_google_protobuf_timestamp_proto_goTypes = nil file_google_protobuf_timestamp_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go index 15b424ec12..5de5301063 100644 --- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go @@ -48,6 +48,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // Wrapper message for `double`. @@ -529,7 +530,7 @@ func (x *BytesValue) GetValue() []byte { var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor -var file_google_protobuf_wrappers_proto_rawDesc = []byte{ +var file_google_protobuf_wrappers_proto_rawDesc = string([]byte{ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, @@ -563,16 +564,16 @@ var file_google_protobuf_wrappers_proto_rawDesc = []byte{ 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_wrappers_proto_rawDescOnce sync.Once - file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc + file_google_protobuf_wrappers_proto_rawDescData []byte ) func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte { file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() { - file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData) + file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc))) }) return file_google_protobuf_wrappers_proto_rawDescData } @@ -606,7 +607,7 @@ func file_google_protobuf_wrappers_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)), NumEnums: 0, NumMessages: 9, NumExtensions: 0, @@ -617,7 +618,6 @@ func file_google_protobuf_wrappers_proto_init() { MessageInfos: file_google_protobuf_wrappers_proto_msgTypes, }.Build() File_google_protobuf_wrappers_proto = out.File - file_google_protobuf_wrappers_proto_rawDesc = nil file_google_protobuf_wrappers_proto_goTypes = nil file_google_protobuf_wrappers_proto_depIdxs = nil } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go index 167baf680d..58751ed0ec 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors provides detailed error types for api field validation. -package errors // import "k8s.io/apimachinery/pkg/api/errors" +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go index b6d42acf8f..a3b18a5c9a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package meta provides functions for retrieving API metadata from objects // belonging to the Kubernetes API -package meta // import "k8s.io/apimachinery/pkg/api/meta" +package meta diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 1fdd32c4ba..468afd0e9e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -221,6 +221,9 @@ func extractList(obj runtime.Object, allocNew bool) ([]runtime.Object, error) { if err != nil { return nil, err } + if items.IsNil() { + return nil, nil + } list := make([]runtime.Object, items.Len()) if len(list) == 0 { return list, nil diff --git a/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go new file mode 100644 index 0000000000..9f5ae7a9d4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/operation/operation.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package operation + +import "k8s.io/apimachinery/pkg/util/sets" + +// Operation provides contextual information about a validation request and the API +// operation being validated. +// This type is intended for use with generate validation code and may be enhanced +// in the future to include other information needed to validate requests. +type Operation struct { + // Type is the category of operation being validated. This does not + // differentiate between HTTP verbs like PUT and PATCH, but rather merges + // those into a single "Update" category. + Type Type + + // Options declare the options enabled for validation. + // + // Options should be set according to a resource validation strategy before validation + // is performed, and must be treated as read-only during validation. + // + // Options are identified by string names. Option string names may match the name of a feature + // gate, in which case the presence of the name in the set indicates that the feature is + // considered enabled for the resource being validated. Note that a resource may have a + // feature enabled even when the feature gate is disabled. This can happen when feature is + // already in-use by a resource, often because the feature gate was enabled when the + // resource first began using the feature. + // + // Unset options are disabled/false. + Options sets.Set[string] +} + +// Code is the request operation to be validated. +type Type uint32 + +const ( + // Create indicates the request being validated is for a resource create operation. + Create Type = iota + + // Update indicates the request being validated is for a resource update operation. + Update +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go index 9f20152e45..9e305b0b18 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package validation contains generic api type validation functions. -package validation // import "k8s.io/apimachinery/pkg/api/validation" +package validation diff --git a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go index e0b5b14900..f9cada1f0f 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go +++ b/vendor/k8s.io/apimachinery/pkg/api/validation/generic.go @@ -82,7 +82,7 @@ func maskTrailingDash(name string) string { func ValidateNonnegativeField(value int64, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if value < 0 { - allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath, value, IsNegativeErrorMsg).WithOrigin("minimum")) } return allErrs } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go index 2741ee2c80..1e85c5c43d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -17,4 +17,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 -package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" +package internalversion diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go index a45fa2a8a5..b5ce956ee5 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -14,4 +14,4 @@ See the License for the specific language governing permissions and limitations under the License. */ -package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" +package scheme diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go index 00d2b8c689..8c60e7d2a8 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -41,8 +41,6 @@ type ListOptions struct { // assume bookmarks are returned at any specific interval, nor may they // assume the server will send any BOOKMARK event during a session. // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. AllowWatchBookmarks bool // resourceVersion sets a constraint on what resource versions a request may be served from. // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 7736753d66..617b9a5d30 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -21,4 +21,4 @@ limitations under the License. // +groupName=meta.k8s.io -package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" +package v1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go index 3cf9d48e96..a5f437b4b3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_fuzz.go @@ -20,21 +20,22 @@ limitations under the License. package v1 import ( + "math/rand" "time" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface. -func (t *MicroTime) Fuzz(c fuzz.Continue) { +// Fuzz satisfies randfill.SimpleSelfFiller. +func (t *MicroTime) RandFill(r *rand.Rand) { if t == nil { return } // Allow for about 1000 years of randomness. Accurate to a tenth of // micro second. Leave off nanoseconds because JSON doesn't // represent them so they can't round-trip properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) + t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 1000*r.Int63n(1000000)) } -// ensure MicroTime implements fuzz.Interface -var _ fuzz.Interface = &MicroTime{} +// ensure MicroTime implements randfill.Interface +var _ randfill.SimpleSelfFiller = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go index bf9e21b5bd..48fb978450 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_fuzz.go @@ -20,21 +20,22 @@ limitations under the License. package v1 import ( + "math/rand" "time" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface. -func (t *Time) Fuzz(c fuzz.Continue) { +// Fuzz satisfies randfill.SimpleSelfFiller. +func (t *Time) RandFill(r *rand.Rand) { if t == nil { return } // Allow for about 1000 years of randomness. Leave off nanoseconds // because JSON doesn't represent them so they can't round-trip // properly. - t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) + t.Time = time.Unix(r.Int63n(1000*365*24*60*60), 0) } -// ensure Time implements fuzz.Interface -var _ fuzz.Interface = &Time{} +// ensure Time implements randfill.SimpleSelfFiller +var _ randfill.SimpleSelfFiller = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 71f7b163a1..59f43b7bff 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -188,7 +188,7 @@ func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, b // NestedStringMap returns a copy of map[string]string value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map. func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { - m, found, err := nestedMapNoCopy(obj, fields...) + m, found, err := nestedMapNoCopy(obj, false, fields...) if !found || err != nil { return nil, found, err } @@ -203,10 +203,32 @@ func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]s return strMap, true, nil } +// NestedNullCoercingStringMap returns a copy of map[string]string value of a nested field. +// Returns `nil, true, nil` if the value exists and is explicitly null. +// Returns `nil, false, err` if the value is not a map or a null value, or is a map and contains non-string non-null values. +// Null values in the map are coerced to "" to match json decoding behavior. +func NestedNullCoercingStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + m, found, err := nestedMapNoCopy(obj, true, fields...) + if !found || err != nil || m == nil { + return nil, found, err + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else if v == nil { + strMap[k] = "" + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string value in the map under key %q: %v is of the type %T, expected string", jsonPath(fields), k, v, v) + } + } + return strMap, true, nil +} + // NestedMap returns a deep copy of map[string]interface{} value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{}. func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { - m, found, err := nestedMapNoCopy(obj, fields...) + m, found, err := nestedMapNoCopy(obj, false, fields...) if !found || err != nil { return nil, found, err } @@ -215,11 +237,14 @@ func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interfa // nestedMapNoCopy returns a map[string]interface{} value of a nested field. // Returns false if value is not found and an error if not a map[string]interface{}. -func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { +func nestedMapNoCopy(obj map[string]interface{}, tolerateNil bool, fields ...string) (map[string]interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { return nil, found, err } + if val == nil && tolerateNil { + return nil, true, nil + } m, ok := val.(map[string]interface{}) if !ok { return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 5e36a91ee4..fdb0c86297 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -397,7 +397,7 @@ func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds } func (u *Unstructured) GetLabels() map[string]string { - m, _, _ := NestedStringMap(u.Object, "metadata", "labels") + m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "labels") return m } @@ -410,7 +410,7 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - m, _, _ := NestedStringMap(u.Object, "metadata", "annotations") + m, _, _ := NestedNullCoercingStringMap(u.Object, "metadata", "annotations") return m } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go index b1eb1bbfc4..95240b74d2 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation/validation.go @@ -104,7 +104,7 @@ func ValidateLabelSelectorRequirement(sr metav1.LabelSelectorRequirement, opts L func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, msg := range validation.IsQualifiedName(labelName) { - allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg)) + allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg).WithOrigin("labelKey")) } return allErrs } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index 20c9d2ec73..46b0e133c3 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -20,4 +20,4 @@ limitations under the License. // +groupName=meta.k8s.io -package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go index 7415d81646..0c46ef2d16 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -21,4 +21,4 @@ limitations under the License. // but for the fields which did not change, copying is automated. This makes it // easy to modify the structures you use in memory without affecting the format // you store on disk or respond to in your external API calls. -package conversion // import "k8s.io/apimachinery/pkg/conversion" +package conversion diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go index 7b763de6f0..4c1002a4c1 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package queryparams provides conversion from versioned // runtime objects to URL query values -package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams" +package queryparams diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go index c39b8039ae..49059e2635 100644 --- a/vendor/k8s.io/apimachinery/pkg/fields/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package fields implements a simple field system, parsing and matching // selectors with sets of fields. -package fields // import "k8s.io/apimachinery/pkg/fields" +package fields diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go index 82de0051bd..35ba788094 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package labels implements a simple label system, parsing and matching // selectors with sets of labels. -package labels // import "k8s.io/apimachinery/pkg/labels" +package labels diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go index 89feb40103..b54429bd89 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -48,4 +48,4 @@ limitations under the License. // // As a bonus, a few common types useful from all api objects and versions // are provided in types.go. -package runtime // import "k8s.io/apimachinery/pkg/runtime" +package runtime diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index 2703300cd5..202bf4f01a 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -259,6 +259,7 @@ type ObjectDefaulter interface { type ObjectVersioner interface { ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + PrioritizedVersionsForGroup(group string) []schema.GroupVersion } // ObjectConvertor converts an object to a different version. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index a5b116718d..fde87f1a13 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -17,15 +17,18 @@ limitations under the License. package runtime import ( + "context" "fmt" "reflect" "strings" + "k8s.io/apimachinery/pkg/api/operation" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/naming" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" ) // Scheme defines methods for serializing and deserializing API objects, a type @@ -68,6 +71,12 @@ type Scheme struct { // the provided object must be a pointer. defaulterFuncs map[reflect.Type]func(interface{}) + // validationFuncs is a map to funcs to be called with an object to perform validation. + // The provided object must be a pointer. + // If oldObject is non-nil, update validation is performed and may perform additional + // validation such as transition rules and immutability checks. + validationFuncs map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList + // converter stores all registered conversion functions. It also has // default converting behavior. converter *conversion.Converter @@ -96,6 +105,7 @@ func NewScheme() *Scheme { unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, + validationFuncs: map[reflect.Type]func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresource ...string) field.ErrorList{}, versionPriority: map[string][]string{}, schemeName: naming.GetNameFromCallsite(internalPackages...), } @@ -347,6 +357,35 @@ func (s *Scheme) Default(src Object) { } } +// AddValidationFunc registered a function that can validate the object, and +// oldObject. These functions will be invoked when Validate() or ValidateUpdate() +// is called. The function will never be called unless the validated object +// matches srcType. If this function is invoked twice with the same srcType, the +// fn passed to the later call will be used instead. +func (s *Scheme) AddValidationFunc(srcType Object, fn func(ctx context.Context, op operation.Operation, object, oldObject interface{}, subresources ...string) field.ErrorList) { + s.validationFuncs[reflect.TypeOf(srcType)] = fn +} + +// Validate validates the provided Object according to the generated declarative validation code. +// WARNING: This does not validate all objects! The handwritten validation code in validation.go +// is not run when this is called. Only the generated zz_generated.validations.go validation code is run. +func (s *Scheme) Validate(ctx context.Context, options sets.Set[string], object Object, subresources ...string) field.ErrorList { + if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok { + return fn(ctx, operation.Operation{Type: operation.Create, Options: options}, object, nil, subresources...) + } + return nil +} + +// ValidateUpdate validates the provided object and oldObject according to the generated declarative validation code. +// WARNING: This does not validate all objects! The handwritten validation code in validation.go +// is not run when this is called. Only the generated zz_generated.validations.go validation code is run. +func (s *Scheme) ValidateUpdate(ctx context.Context, options sets.Set[string], object, oldObject Object, subresources ...string) field.ErrorList { + if fn, ok := s.validationFuncs[reflect.TypeOf(object)]; ok { + return fn(ctx, operation.Operation{Type: operation.Update, Options: options}, object, oldObject, subresources...) + } + return nil +} + // Convert will attempt to convert in into out. Both must be pointers. For easy // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go index 858529e958..e550ea348d 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/cbor/internal/modes/custom.go @@ -140,7 +140,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec var wg sync.WaitGroup wg.Add(1) defer wg.Done() - c = checker{ + placeholder := checker{ safe: func() bool { wg.Wait() return c.safe() @@ -150,7 +150,7 @@ func (cache *checkers) getCheckerInternal(rt reflect.Type, parent *path) (c chec return c.check(rv, depth) }, } - if actual, loaded := cache.m.LoadOrStore(rt, &c); loaded { + if actual, loaded := cache.m.LoadOrStore(rt, &placeholder); loaded { // Someone else stored an entry for this type, use it. return *actual.(*checker) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index 77bb307452..81286fccb4 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -28,7 +28,7 @@ import ( func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []runtime.SerializerInfo { jsonSerializer := json.NewSerializerWithOptions( mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON}, ) jsonSerializerType := runtime.SerializerInfo{ MediaType: runtime.ContentTypeJSON, @@ -38,7 +38,7 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option Serializer: jsonSerializer, StrictSerializer: json.NewSerializerWithOptions( mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: true}, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: true, StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToJSON}, ), StreamSerializer: &runtime.StreamSerializerInfo{ EncodesAsText: true, @@ -61,7 +61,9 @@ func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, option mf, scheme, scheme, json.SerializerOptions{Yaml: true, Pretty: false, Strict: true}, ) - protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoSerializer := protobuf.NewSerializerWithOptions(scheme, scheme, protobuf.SerializerOptions{ + StreamingCollectionsEncoding: options.StreamingCollectionsEncodingToProtobuf, + }) protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) serializers := []runtime.SerializerInfo{ @@ -113,6 +115,9 @@ type CodecFactoryOptions struct { // Pretty includes a pretty serializer along with the non-pretty one Pretty bool + StreamingCollectionsEncodingToJSON bool + StreamingCollectionsEncodingToProtobuf bool + serializers []func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.SerializerInfo } @@ -147,6 +152,18 @@ func WithSerializer(f func(runtime.ObjectCreater, runtime.ObjectTyper) runtime.S } } +func WithStreamingCollectionEncodingToJSON() CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.StreamingCollectionsEncodingToJSON = true + } +} + +func WithStreamingCollectionEncodingToProtobuf() CodecFactoryOptionsMutator { + return func(options *CodecFactoryOptions) { + options.StreamingCollectionsEncodingToProtobuf = true + } +} + // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go new file mode 100644 index 0000000000..075163ddd8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/collections.go @@ -0,0 +1,230 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + "io" + "maps" + "slices" + "sort" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/conversion" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +func streamEncodeCollections(obj runtime.Object, w io.Writer) (bool, error) { + list, ok := obj.(*unstructured.UnstructuredList) + if ok { + return true, streamingEncodeUnstructuredList(w, list) + } + if _, ok := obj.(json.Marshaler); ok { + return false, nil + } + typeMeta, listMeta, items, err := getListMeta(obj) + if err == nil { + return true, streamingEncodeList(w, typeMeta, listMeta, items) + } + return false, nil +} + +// getListMeta implements list extraction logic for json stream serialization. +// +// Reason for a custom logic instead of reusing accessors from meta package: +// * Validate json tags to prevent incompatibility with json standard package. +// * ListMetaAccessor doesn't distinguish empty from nil value. +// * TypeAccessort reparsing "apiVersion" and serializing it with "{group}/{version}" +func getListMeta(list runtime.Object) (metav1.TypeMeta, metav1.ListMeta, []runtime.Object, error) { + listValue, err := conversion.EnforcePtr(list) + if err != nil { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err + } + listType := listValue.Type() + if listType.NumField() != 3 { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListType to have 3 fields") + } + // TypeMeta + typeMeta, ok := listValue.Field(0).Interface().(metav1.TypeMeta) + if !ok { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected TypeMeta field to have TypeMeta type") + } + if listType.Field(0).Tag.Get("json") != ",inline" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected TypeMeta json field tag to be ",inline"`) + } + // ListMeta + listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) + if !ok { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf("expected ListMeta field to have ListMeta type") + } + if listType.Field(1).Tag.Get("json") != "metadata,omitempty" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected ListMeta json field tag to be "metadata,omitempty"`) + } + // Items + items, err := meta.ExtractList(list) + if err != nil { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, err + } + if listType.Field(2).Tag.Get("json") != "items" { + return metav1.TypeMeta{}, metav1.ListMeta{}, nil, fmt.Errorf(`expected Items json field tag to be "items"`) + } + return typeMeta, listMeta, items, nil +} + +func streamingEncodeList(w io.Writer, typeMeta metav1.TypeMeta, listMeta metav1.ListMeta, items []runtime.Object) error { + // Start + if _, err := w.Write([]byte(`{`)); err != nil { + return err + } + + // TypeMeta + if typeMeta.Kind != "" { + if err := encodeKeyValuePair(w, "kind", typeMeta.Kind, []byte(",")); err != nil { + return err + } + } + if typeMeta.APIVersion != "" { + if err := encodeKeyValuePair(w, "apiVersion", typeMeta.APIVersion, []byte(",")); err != nil { + return err + } + } + + // ListMeta + if err := encodeKeyValuePair(w, "metadata", listMeta, []byte(",")); err != nil { + return err + } + + // Items + if err := encodeItemsObjectSlice(w, items); err != nil { + return err + } + + // End + _, err := w.Write([]byte("}\n")) + return err +} + +func encodeItemsObjectSlice(w io.Writer, items []runtime.Object) (err error) { + if items == nil { + err := encodeKeyValuePair(w, "items", nil, nil) + return err + } + _, err = w.Write([]byte(`"items":[`)) + if err != nil { + return err + } + suffix := []byte(",") + for i, item := range items { + if i == len(items)-1 { + suffix = nil + } + err := encodeValue(w, item, suffix) + if err != nil { + return err + } + } + _, err = w.Write([]byte("]")) + if err != nil { + return err + } + return err +} + +func streamingEncodeUnstructuredList(w io.Writer, list *unstructured.UnstructuredList) error { + _, err := w.Write([]byte(`{`)) + if err != nil { + return err + } + keys := slices.Collect(maps.Keys(list.Object)) + if _, exists := list.Object["items"]; !exists { + keys = append(keys, "items") + } + sort.Strings(keys) + + suffix := []byte(",") + for i, key := range keys { + if i == len(keys)-1 { + suffix = nil + } + if key == "items" { + err = encodeItemsUnstructuredSlice(w, list.Items, suffix) + } else { + err = encodeKeyValuePair(w, key, list.Object[key], suffix) + } + if err != nil { + return err + } + } + _, err = w.Write([]byte("}\n")) + return err +} + +func encodeItemsUnstructuredSlice(w io.Writer, items []unstructured.Unstructured, suffix []byte) (err error) { + _, err = w.Write([]byte(`"items":[`)) + if err != nil { + return err + } + comma := []byte(",") + for i, item := range items { + if i == len(items)-1 { + comma = nil + } + err := encodeValue(w, item.Object, comma) + if err != nil { + return err + } + } + _, err = w.Write([]byte("]")) + if err != nil { + return err + } + if len(suffix) > 0 { + _, err = w.Write(suffix) + } + return err +} + +func encodeKeyValuePair(w io.Writer, key string, value any, suffix []byte) (err error) { + err = encodeValue(w, key, []byte(":")) + if err != nil { + return err + } + err = encodeValue(w, value, suffix) + if err != nil { + return err + } + return err +} + +func encodeValue(w io.Writer, value any, suffix []byte) error { + data, err := json.Marshal(value) + if err != nil { + return err + } + _, err = w.Write(data) + if err != nil { + return err + } + if len(suffix) > 0 { + _, err = w.Write(suffix) + } + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 1ae4a32eb7..24f66a1017 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -36,7 +36,7 @@ import ( // is not nil, the object has the group, version, and kind fields set. // Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false, false}) } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer @@ -44,7 +44,7 @@ func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtim // matches JSON, and will error if constructs are used that do not serialize to JSON. // Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false, false}) } // NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML @@ -93,6 +93,9 @@ type SerializerOptions struct { // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. Strict bool + + // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed. + StreamingCollectionsEncoding bool } // Serializer handles encoding versioned objects into the proper JSON form @@ -242,6 +245,15 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { _, err = w.Write(data) return err } + if s.options.StreamingCollectionsEncoding { + ok, err := streamEncodeCollections(obj, w) + if err != nil { + return err + } + if ok { + return nil + } + } encoder := json.NewEncoder(w) return encoder.Encode(obj) } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go new file mode 100644 index 0000000000..754a80820b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/collections.go @@ -0,0 +1,174 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "errors" + "io" + "math/bits" + + "github.com/gogo/protobuf/proto" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + errFieldCount = errors.New("expected ListType to have 3 fields") + errTypeMetaField = errors.New("expected TypeMeta field to have TypeMeta type") + errTypeMetaProtobufTag = errors.New(`expected TypeMeta protobuf field tag to be ""`) + errListMetaField = errors.New("expected ListMeta field to have ListMeta type") + errListMetaProtobufTag = errors.New(`expected ListMeta protobuf field tag to be "bytes,1,opt,name=metadata"`) + errItemsProtobufTag = errors.New(`expected Items protobuf field tag to be "bytes,2,rep,name=items"`) + errItemsSizer = errors.New(`expected Items elements to implement proto.Sizer`) +) + +// getStreamingListData implements list extraction logic for protobuf stream serialization. +// +// Reason for a custom logic instead of reusing accessors from meta package: +// * Validate proto tags to prevent incompatibility with proto standard package. +// * ListMetaAccessor doesn't distinguish empty from nil value. +// * TypeAccessor reparsing "apiVersion" and serializing it with "{group}/{version}" +func getStreamingListData(list runtime.Object) (data streamingListData, err error) { + listValue, err := conversion.EnforcePtr(list) + if err != nil { + return data, err + } + listType := listValue.Type() + if listType.NumField() != 3 { + return data, errFieldCount + } + // TypeMeta: validated, but not returned as is not serialized. + _, ok := listValue.Field(0).Interface().(metav1.TypeMeta) + if !ok { + return data, errTypeMetaField + } + if listType.Field(0).Tag.Get("protobuf") != "" { + return data, errTypeMetaProtobufTag + } + // ListMeta + listMeta, ok := listValue.Field(1).Interface().(metav1.ListMeta) + if !ok { + return data, errListMetaField + } + // if we were ever to relax the protobuf tag check we should update the hardcoded `0xa` below when writing ListMeta. + if listType.Field(1).Tag.Get("protobuf") != "bytes,1,opt,name=metadata" { + return data, errListMetaProtobufTag + } + data.listMeta = listMeta + // Items; if we were ever to relax the protobuf tag check we should update the hardcoded `0x12` below when writing Items. + if listType.Field(2).Tag.Get("protobuf") != "bytes,2,rep,name=items" { + return data, errItemsProtobufTag + } + items, err := meta.ExtractList(list) + if err != nil { + return data, err + } + data.items = items + data.totalSize, data.listMetaSize, data.itemsSizes, err = listSize(listMeta, items) + return data, err +} + +type streamingListData struct { + // totalSize is the total size of the serialized List object, including their proto headers/size bytes + totalSize int + + // listMetaSize caches results from .Size() call to listMeta, doesn't include header bytes (field identifier, size) + listMetaSize int + listMeta metav1.ListMeta + + // itemsSizes caches results from .Size() call to items, doesn't include header bytes (field identifier, size) + itemsSizes []int + items []runtime.Object +} + +// listSize return size of ListMeta and items to be later used for preallocations. +// listMetaSize and itemSizes do not include header bytes (field identifier, size). +func listSize(listMeta metav1.ListMeta, items []runtime.Object) (totalSize, listMetaSize int, itemSizes []int, err error) { + // ListMeta + listMetaSize = listMeta.Size() + totalSize += 1 + sovGenerated(uint64(listMetaSize)) + listMetaSize + // Items + itemSizes = make([]int, len(items)) + for i, item := range items { + sizer, ok := item.(proto.Sizer) + if !ok { + return totalSize, listMetaSize, nil, errItemsSizer + } + n := sizer.Size() + itemSizes[i] = n + totalSize += 1 + sovGenerated(uint64(n)) + n + } + return totalSize, listMetaSize, itemSizes, nil +} + +func streamingEncodeUnknownList(w io.Writer, unk runtime.Unknown, listData streamingListData, memAlloc runtime.MemoryAllocator) error { + _, err := w.Write(protoEncodingPrefix) + if err != nil { + return err + } + // encodeList is responsible for encoding the List into the unknown Raw. + encodeList := func(writer io.Writer) (int, error) { + return streamingEncodeList(writer, listData, memAlloc) + } + _, err = unk.MarshalToWriter(w, listData.totalSize, encodeList) + return err +} + +func streamingEncodeList(w io.Writer, listData streamingListData, memAlloc runtime.MemoryAllocator) (size int, err error) { + // ListMeta; 0xa = (1 << 3) | 2; field number: 1, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure + n, err := doEncodeWithHeader(&listData.listMeta, w, 0xa, listData.listMetaSize, memAlloc) + size += n + if err != nil { + return size, err + } + // Items; 0x12 = (2 << 3) | 2; field number: 2, type: 2 (LEN). https://protobuf.dev/programming-guides/encoding/#structure + for i, item := range listData.items { + n, err := doEncodeWithHeader(item, w, 0x12, listData.itemsSizes[i], memAlloc) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + +func writeVarintGenerated(w io.Writer, v int) (int, error) { + buf := make([]byte, sovGenerated(uint64(v))) + encodeVarintGenerated(buf, len(buf), uint64(v)) + return w.Write(buf) +} + +// sovGenerated is copied from `generated.pb.go` returns size of varint. +func sovGenerated(v uint64) int { + return (bits.Len64(v|1) + 6) / 7 +} + +// encodeVarintGenerated is copied from `generated.pb.go` encodes varint. +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go index 72d0ac79b3..381748d69f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package protobuf provides a Kubernetes serializer for the protobuf format. -package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +package protobuf diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index c63e6dc63f..c66c49ac4c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -72,10 +72,18 @@ func IsNotMarshalable(err error) bool { // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(creater, typer, SerializerOptions{}) +} + +// NewSerializerWithOptions creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +func NewSerializerWithOptions(creater runtime.ObjectCreater, typer runtime.ObjectTyper, opts SerializerOptions) *Serializer { return &Serializer{ prefix: protoEncodingPrefix, creater: creater, typer: typer, + options: opts, } } @@ -84,6 +92,14 @@ type Serializer struct { prefix []byte creater runtime.ObjectCreater typer runtime.ObjectTyper + + options SerializerOptions +} + +// SerializerOptions holds the options which are used to configure a Proto serializer. +type SerializerOptions struct { + // StreamingCollectionsEncoding enables encoding collection, one item at the time, drastically reducing memory needed. + StreamingCollectionsEncoding bool } var _ runtime.Serializer = &Serializer{} @@ -209,6 +225,13 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime. }, } } + if s.options.StreamingCollectionsEncoding { + listData, err := getStreamingListData(obj) + if err == nil { + // Doesn't honor custom proto marshaling methods (like json streaming), because all proto objects implement proto methods. + return streamingEncodeUnknownList(w, unk, listData, memAlloc) + } + } switch t := obj.(type) { case bufferedMarshaller: @@ -428,6 +451,39 @@ func (s *RawSerializer) encode(obj runtime.Object, w io.Writer, memAlloc runtime } func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runtime.MemoryAllocator) error { + _, err := doEncode(obj, w, nil, memAlloc) + return err +} + +func doEncodeWithHeader(obj any, w io.Writer, field byte, precomputedSize int, memAlloc runtime.MemoryAllocator) (size int, err error) { + // Field identifier + n, err := w.Write([]byte{field}) + size += n + if err != nil { + return size, err + } + // Size + n, err = writeVarintGenerated(w, precomputedSize) + size += n + if err != nil { + return size, err + } + // Obj + n, err = doEncode(obj, w, &precomputedSize, memAlloc) + size += n + if err != nil { + return size, err + } + if n != precomputedSize { + return size, fmt.Errorf("the size value was %d, but doEncode wrote %d bytes to data", precomputedSize, n) + } + return size, nil +} + +// doEncode encodes provided object into writer using a allocator if possible. +// Avoids call by object Size if precomputedObjSize is provided. +// precomputedObjSize should not include header bytes (field identifier, size). +func doEncode(obj any, w io.Writer, precomputedObjSize *int, memAlloc runtime.MemoryAllocator) (int, error) { if memAlloc == nil { klog.Error("a mandatory memory allocator wasn't provided, this might have a negative impact on performance, check invocations of EncodeWithAllocator method, falling back on runtime.SimpleAllocator") memAlloc = &runtime.SimpleAllocator{} @@ -436,40 +492,43 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer, memAlloc runti case bufferedReverseMarshaller: // this path performs a single allocation during write only when the Allocator wasn't provided // it also requires the caller to implement the more efficient Size and MarshalToSizedBuffer methods - encodedSize := uint64(t.Size()) - data := memAlloc.Allocate(encodedSize) + if precomputedObjSize == nil { + s := t.Size() + precomputedObjSize = &s + } + data := memAlloc.Allocate(uint64(*precomputedObjSize)) n, err := t.MarshalToSizedBuffer(data) if err != nil { - return err + return 0, err } - _, err = w.Write(data[:n]) - return err + return w.Write(data[:n]) case bufferedMarshaller: // this path performs a single allocation during write only when the Allocator wasn't provided // it also requires the caller to implement the more efficient Size and MarshalTo methods - encodedSize := uint64(t.Size()) - data := memAlloc.Allocate(encodedSize) + if precomputedObjSize == nil { + s := t.Size() + precomputedObjSize = &s + } + data := memAlloc.Allocate(uint64(*precomputedObjSize)) n, err := t.MarshalTo(data) if err != nil { - return err + return 0, err } - _, err = w.Write(data[:n]) - return err + return w.Write(data[:n]) case proto.Marshaler: // this path performs extra allocations data, err := t.Marshal() if err != nil { - return err + return 0, err } - _, err = w.Write(data) - return err + return w.Write(data) default: - return errNotMarshalable{reflect.TypeOf(obj)} + return 0, errNotMarshalable{reflect.TypeOf(obj)} } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index a82227b239..27a2064c41 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -18,6 +18,7 @@ package runtime import ( "fmt" + "io" ) type ProtobufMarshaller interface { @@ -28,6 +29,124 @@ type ProtobufReverseMarshaller interface { MarshalToSizedBuffer(data []byte) (int, error) } +const ( + typeMetaTag = 0xa + rawTag = 0x12 + contentEncodingTag = 0x1a + contentTypeTag = 0x22 + + // max length of a varint for a uint64 + maxUint64VarIntLength = 10 +) + +// MarshalToWriter allows a caller to provide a streaming writer for raw bytes, +// instead of populating them inside the Unknown struct. +// rawSize is the number of bytes rawWriter will write in a success case. +// writeRaw is called when it is time to write the raw bytes. It must return `rawSize, nil` or an error. +func (m *Unknown) MarshalToWriter(w io.Writer, rawSize int, writeRaw func(io.Writer) (int, error)) (int, error) { + size := 0 + + // reuse the buffer for varint marshaling + varintBuffer := make([]byte, maxUint64VarIntLength) + writeVarint := func(i int) (int, error) { + offset := encodeVarintGenerated(varintBuffer, len(varintBuffer), uint64(i)) + return w.Write(varintBuffer[offset:]) + } + + // TypeMeta + { + n, err := w.Write([]byte{typeMetaTag}) + size += n + if err != nil { + return size, err + } + + typeMetaBytes, err := m.TypeMeta.Marshal() + if err != nil { + return size, err + } + + n, err = writeVarint(len(typeMetaBytes)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write(typeMetaBytes) + size += n + if err != nil { + return size, err + } + } + + // Raw, delegating write to writeRaw() + { + n, err := w.Write([]byte{rawTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(rawSize) + size += n + if err != nil { + return size, err + } + + n, err = writeRaw(w) + size += n + if err != nil { + return size, err + } + if n != int(rawSize) { + return size, fmt.Errorf("the size value was %d, but encoding wrote %d bytes to data", rawSize, n) + } + } + + // ContentEncoding + { + n, err := w.Write([]byte{contentEncodingTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(len(m.ContentEncoding)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write([]byte(m.ContentEncoding)) + size += n + if err != nil { + return size, err + } + } + + // ContentEncoding + { + n, err := w.Write([]byte{contentTypeTag}) + size += n + if err != nil { + return size, err + } + + n, err = writeVarint(len(m.ContentType)) + size += n + if err != nil { + return size, err + } + + n, err = w.Write([]byte(m.ContentType)) + size += n + if err != nil { + return size, err + } + } + return size, nil +} + // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown // that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { @@ -43,12 +162,12 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 copy(data[i:], m.ContentType) i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) i-- - data[i] = 0x22 + data[i] = contentTypeTag i -= len(m.ContentEncoding) copy(data[i:], m.ContentEncoding) i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) i-- - data[i] = 0x1a + data[i] = contentEncodingTag if b != nil { if r, ok := b.(ProtobufReverseMarshaller); ok { n1, err := r.MarshalToSizedBuffer(data[:i]) @@ -75,7 +194,7 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 } i = encodeVarintGenerated(data, i, size) i-- - data[i] = 0x12 + data[i] = rawTag } n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) if err != nil { @@ -84,6 +203,6 @@ func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64 i -= n2 i = encodeVarintGenerated(data, i, uint64(n2)) i-- - data[i] = 0xa + data[i] = typeMetaTag return msgSize - i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go index 5667fa9921..783cbcdc8d 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package types implements various generic types used throughout kubernetes. -package types // import "k8s.io/apimachinery/pkg/types" +package types diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go index fc03018449..38b666ef59 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -23,7 +23,7 @@ import ( "strings" "text/tabwriter" - "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp" //nolint:depguard "k8s.io/apimachinery/pkg/util/dump" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go index 5d4d6250a3..b3b39bc388 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package errors implements various utility functions and types around errors. -package errors // import "k8s.io/apimachinery/pkg/util/errors" +package errors diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go index 1ab8fd396e..f18845a417 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -91,12 +91,12 @@ func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { } n, err := io.ReadAtLeast(r.r, data[:max], int(max)) r.remaining -= n - if err == io.ErrShortBuffer || r.remaining > 0 { - return n, io.ErrShortBuffer - } if err != nil { return n, err } + if r.remaining > 0 { + return n, io.ErrShortBuffer + } if n != expect { return n, io.ErrUnexpectedEOF } diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go index 5893df5bd2..1da83f14b1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package httpstream adds multiplexed streaming support to HTTP requests and // responses via connection upgrades. -package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream" +package httpstream diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go index 3dd6f828b7..a57e8df60a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/wsstream/doc.go @@ -66,4 +66,4 @@ limitations under the License. // WRITE []byte{0, 102, 111, 111, 10} # send "foo\n" on channel 0 (STDIN) // WRITE []byte{255, 0} # send CLOSE signal (STDIN) // CLOSE -package wsstream // import "k8s.io/apimachinery/pkg/util/httpstream/wsstream" +package wsstream diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go index a502b5adb6..2d6f6a0ccc 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/instr_fuzz.go @@ -20,24 +20,24 @@ limitations under the License. package intstr import ( - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" ) -// Fuzz satisfies fuzz.Interface -func (intstr *IntOrString) Fuzz(c fuzz.Continue) { +// RandFill satisfies randfill.NativeSelfFiller +func (intstr *IntOrString) RandFill(c randfill.Continue) { if intstr == nil { return } - if c.RandBool() { + if c.Bool() { intstr.Type = Int - c.Fuzz(&intstr.IntVal) + c.Fill(&intstr.IntVal) intstr.StrVal = "" } else { intstr.Type = String intstr.IntVal = 0 - c.Fuzz(&intstr.StrVal) + c.Fill(&intstr.StrVal) } } // ensure IntOrString implements fuzz.Interface -var _ fuzz.Interface = &IntOrString{} +var _ randfill.NativeSelfFiller = &IntOrString{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go index d14ecfad54..ea710f6b15 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package proxy provides transport and upgrade support for proxies. -package proxy // import "k8s.io/apimachinery/pkg/util/proxy" +package proxy diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index df374949dd..de97deae03 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -36,6 +36,11 @@ var ( ) // PanicHandlers is a list of functions which will be invoked when a panic happens. +// +// The code invoking these handlers prepares a contextual logger so that +// klog.FromContext(ctx) already skips over the panic handler itself and +// several other intermediate functions, ideally such that the log output +// is attributed to the code which triggered the panic. var PanicHandlers = []func(context.Context, interface{}){logPanic} // HandleCrash simply catches a crash and logs an error. Meant to be called via @@ -45,7 +50,7 @@ var PanicHandlers = []func(context.Context, interface{}){logPanic} // // E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. // -// Contextual logging: HandleCrashWithContext should be used instead of HandleCrash in code which supports contextual logging. +// Contextual logging: HandleCrashWithContext or HandleCrashWithLogger should be used instead of HandleCrash in code which supports contextual logging. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { additionalHandlersWithContext := make([]func(context.Context, interface{}), len(additionalHandlers)) @@ -74,10 +79,30 @@ func HandleCrashWithContext(ctx context.Context, additionalHandlers ...func(cont } } -// handleCrash is the common implementation of HandleCrash and HandleCrash. +// HandleCrashWithLogger simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +func HandleCrashWithLogger(logger klog.Logger, additionalHandlers ...func(context.Context, interface{})) { + if r := recover(); r != nil { + ctx := klog.NewContext(context.Background(), logger) + handleCrash(ctx, r, additionalHandlers...) + } +} + +// handleCrash is the common implementation of the HandleCrash* variants. // Having those call a common implementation ensures that the stack depth // is the same regardless through which path the handlers get invoked. func handleCrash(ctx context.Context, r any, additionalHandlers ...func(context.Context, interface{})) { + // We don't really know how many call frames to skip because the Go + // panic handler is between us and the code where the panic occurred. + // If it's one function (as in Go 1.21), then skipping four levels + // gets us to the function which called the `defer HandleCrashWithontext(...)`. + logger := klog.FromContext(ctx).WithCallDepth(4) + ctx = klog.NewContext(ctx, logger) + for _, fn := range PanicHandlers { fn(ctx, r) } @@ -106,11 +131,7 @@ func logPanic(ctx context.Context, r interface{}) { stacktrace := make([]byte, size) stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] - // We don't really know how many call frames to skip because the Go - // panic handler is between us and the code where the panic occurred. - // If it's one function (as in Go 1.21), then skipping four levels - // gets us to the function which called the `defer HandleCrashWithontext(...)`. - logger := klog.FromContext(ctx).WithCallDepth(4) + logger := klog.FromContext(ctx) // For backwards compatibility, conversion to string // is handled here instead of defering to the logging @@ -176,12 +197,19 @@ func HandleError(err error) { // and key/value pairs. // // This variant should be used instead of HandleError because it supports -// structured, contextual logging. +// structured, contextual logging. Alternatively, [HandleErrorWithLogger] can +// be used if a logger is available instead of a context. func HandleErrorWithContext(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { handleError(ctx, err, msg, keysAndValues...) } -// handleError is the common implementation of HandleError and HandleErrorWithContext. +// HandleErrorWithLogger is an alternative to [HandlerErrorWithContext] which accepts +// a logger for contextual logging. +func HandleErrorWithLogger(logger klog.Logger, err error, msg string, keysAndValues ...interface{}) { + handleError(klog.NewContext(context.Background(), logger), err, msg, keysAndValues...) +} + +// handleError is the common implementation of the HandleError* variants. // Using this common implementation ensures that the stack depth // is the same regardless through which path the handlers get invoked. func handleError(ctx context.Context, err error, msg string, keysAndValues ...interface{}) { diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go index fd281bdb88..194883390c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package sets has generic set and specified sets. Generic set will // replace specified ones over time. And specific ones are deprecated. -package sets // import "k8s.io/apimachinery/pkg/util/sets" +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go new file mode 100644 index 0000000000..1d15deae86 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/error_matcher.go @@ -0,0 +1,212 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// ErrorMatcher is a helper for comparing Error objects. +type ErrorMatcher struct { + // TODO(thockin): consider whether type is ever NOT required, maybe just + // assume it. + matchType bool + // TODO(thockin): consider whether field could be assumed - if the + // "want" error has a nil field, don't match on field. + matchField bool + // TODO(thockin): consider whether value could be assumed - if the + // "want" error has a nil value, don't match on field. + matchValue bool + matchOrigin bool + matchDetail func(want, got string) bool + requireOriginWhenInvalid bool +} + +// Matches returns true if the two Error objects match according to the +// configured criteria. +func (m ErrorMatcher) Matches(want, got *Error) bool { + if m.matchType && want.Type != got.Type { + return false + } + if m.matchField && want.Field != got.Field { + return false + } + if m.matchValue && !reflect.DeepEqual(want.BadValue, got.BadValue) { + return false + } + if m.matchOrigin { + if want.Origin != got.Origin { + return false + } + if m.requireOriginWhenInvalid && want.Type == ErrorTypeInvalid { + if want.Origin == "" || got.Origin == "" { + return false + } + } + } + if m.matchDetail != nil && !m.matchDetail(want.Detail, got.Detail) { + return false + } + return true +} + +// Render returns a string representation of the specified Error object, +// according to the criteria configured in the ErrorMatcher. +func (m ErrorMatcher) Render(e *Error) string { + buf := strings.Builder{} + + comma := func() { + if buf.Len() > 0 { + buf.WriteString(", ") + } + } + + if m.matchType { + comma() + buf.WriteString(fmt.Sprintf("Type=%q", e.Type)) + } + if m.matchField { + comma() + buf.WriteString(fmt.Sprintf("Field=%q", e.Field)) + } + if m.matchValue { + comma() + buf.WriteString(fmt.Sprintf("Value=%v", e.BadValue)) + } + if m.matchOrigin || m.requireOriginWhenInvalid && e.Type == ErrorTypeInvalid { + comma() + buf.WriteString(fmt.Sprintf("Origin=%q", e.Origin)) + } + if m.matchDetail != nil { + comma() + buf.WriteString(fmt.Sprintf("Detail=%q", e.Detail)) + } + return "{" + buf.String() + "}" +} + +// Exactly returns a derived ErrorMatcher which matches all fields exactly. +func (m ErrorMatcher) Exactly() ErrorMatcher { + return m.ByType().ByField().ByValue().ByOrigin().ByDetailExact() +} + +// ByType returns a derived ErrorMatcher which also matches by type. +func (m ErrorMatcher) ByType() ErrorMatcher { + m.matchType = true + return m +} + +// ByField returns a derived ErrorMatcher which also matches by field path. +func (m ErrorMatcher) ByField() ErrorMatcher { + m.matchField = true + return m +} + +// ByValue returns a derived ErrorMatcher which also matches by the errant +// value. +func (m ErrorMatcher) ByValue() ErrorMatcher { + m.matchValue = true + return m +} + +// ByOrigin returns a derived ErrorMatcher which also matches by the origin. +func (m ErrorMatcher) ByOrigin() ErrorMatcher { + m.matchOrigin = true + return m +} + +// RequireOriginWhenInvalid returns a derived ErrorMatcher which also requires +// the Origin field to be set when the Type is Invalid and the matcher is +// matching by Origin. +func (m ErrorMatcher) RequireOriginWhenInvalid() ErrorMatcher { + m.requireOriginWhenInvalid = true + return m +} + +// ByDetailExact returns a derived ErrorMatcher which also matches errors by +// the exact detail string. +func (m ErrorMatcher) ByDetailExact() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return got == want + } + return m +} + +// ByDetailSubstring returns a derived ErrorMatcher which also matches errors +// by a substring of the detail string. +func (m ErrorMatcher) ByDetailSubstring() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return strings.Contains(got, want) + } + return m +} + +// ByDetailRegexp returns a derived ErrorMatcher which also matches errors by a +// regular expression of the detail string, where the "want" string is assumed +// to be a valid regular expression. +func (m ErrorMatcher) ByDetailRegexp() ErrorMatcher { + m.matchDetail = func(want, got string) bool { + return regexp.MustCompile(want).MatchString(got) + } + return m +} + +// TestIntf lets users pass a testing.T while not coupling this package to Go's +// testing package. +type TestIntf interface { + Helper() + Errorf(format string, args ...any) + Logf(format string, args ...any) +} + +// Test compares two ErrorLists by the criteria configured in this matcher, and +// fails the test if they don't match. If a given "want" error matches multiple +// "got" errors, they will all be consumed. This might be OK (e.g. if there are +// multiple errors on the same field from the same origin) or it might be an +// insufficiently specific matcher, so these will be logged. +func (m ErrorMatcher) Test(tb TestIntf, want, got ErrorList) { + tb.Helper() + + remaining := got + for _, w := range want { + tmp := make(ErrorList, 0, len(remaining)) + n := 0 + for _, g := range remaining { + if m.Matches(w, g) { + n++ + } else { + tmp = append(tmp, g) + } + } + if n == 0 { + tb.Errorf("expected an error matching:\n%s", m.Render(w)) + } else if n > 1 { + // This is not necessarily and error, but it's worth logging in + // case it's not what the test author intended. + tb.Logf("multiple errors matched:\n%s", m.Render(w)) + } + remaining = tmp + } + if len(remaining) > 0 { + for _, e := range remaining { + exactly := m.Exactly() // makes a copy + tb.Errorf("unmatched error:\n%s", exactly.Render(e)) + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index f1634bc0df..840d645ee3 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -33,13 +33,35 @@ type Error struct { Field string BadValue interface{} Detail string + + // Origin uniquely identifies where this error was generated from. It is used in testing to + // compare expected errors against actual errors without relying on exact detail string matching. + // This allows tests to verify the correct validation logic triggered the error + // regardless of how the error message might be formatted or localized. + // + // The value should be either: + // - A simple camelCase identifier (e.g., "maximum", "maxItems") + // - A structured format using "format=" for validation errors related to specific formats + // (e.g., "format=dns-label", "format=qualified-name") + // + // If the Origin corresponds to an existing declarative validation tag or JSON Schema keyword, + // use that same name for consistency. + // + // Origin should be set in the most deeply nested validation function that + // can still identify the unique source of the error. + Origin string + + // CoveredByDeclarative is true when this error is covered by declarative + // validation. This field is to identify errors from imperative validation + // that should also be caught by declarative validation. + CoveredByDeclarative bool } var _ error = &Error{} // Error implements the error interface. -func (v *Error) Error() string { - return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +func (e *Error) Error() string { + return fmt.Sprintf("%s: %s", e.Field, e.ErrorBody()) } type OmitValueType struct{} @@ -48,21 +70,21 @@ var omitValue = OmitValueType{} // ErrorBody returns the error message without the field name. This is useful // for building nice-looking higher-level error reporting. -func (v *Error) ErrorBody() string { +func (e *Error) ErrorBody() string { var s string switch { - case v.Type == ErrorTypeRequired: - s = v.Type.String() - case v.Type == ErrorTypeForbidden: - s = v.Type.String() - case v.Type == ErrorTypeTooLong: - s = v.Type.String() - case v.Type == ErrorTypeInternal: - s = v.Type.String() - case v.BadValue == omitValue: - s = v.Type.String() + case e.Type == ErrorTypeRequired: + s = e.Type.String() + case e.Type == ErrorTypeForbidden: + s = e.Type.String() + case e.Type == ErrorTypeTooLong: + s = e.Type.String() + case e.Type == ErrorTypeInternal: + s = e.Type.String() + case e.BadValue == omitValue: + s = e.Type.String() default: - value := v.BadValue + value := e.BadValue valueType := reflect.TypeOf(value) if value == nil || valueType == nil { value = "null" @@ -76,26 +98,38 @@ func (v *Error) ErrorBody() string { switch t := value.(type) { case int64, int32, float64, float32, bool: // use simple printer for simple types - s = fmt.Sprintf("%s: %v", v.Type, value) + s = fmt.Sprintf("%s: %v", e.Type, value) case string: - s = fmt.Sprintf("%s: %q", v.Type, t) + s = fmt.Sprintf("%s: %q", e.Type, t) case fmt.Stringer: // anything that defines String() is better than raw struct - s = fmt.Sprintf("%s: %s", v.Type, t.String()) + s = fmt.Sprintf("%s: %s", e.Type, t.String()) default: // fallback to raw struct // TODO: internal types have panic guards against json.Marshalling to prevent // accidental use of internal types in external serialized form. For now, use // %#v, although it would be better to show a more expressive output in the future - s = fmt.Sprintf("%s: %#v", v.Type, value) + s = fmt.Sprintf("%s: %#v", e.Type, value) } } - if len(v.Detail) != 0 { - s += fmt.Sprintf(": %s", v.Detail) + if len(e.Detail) != 0 { + s += fmt.Sprintf(": %s", e.Detail) } return s } +// WithOrigin adds origin information to the FieldError +func (e *Error) WithOrigin(o string) *Error { + e.Origin = o + return e +} + +// MarkCoveredByDeclarative marks the error as covered by declarative validation. +func (e *Error) MarkCoveredByDeclarative() *Error { + e.CoveredByDeclarative = true + return e +} + // ErrorType is a machine readable value providing more detail about why // a field is invalid. These values are expected to match 1-1 with // CauseType in api/types.go. @@ -169,32 +203,32 @@ func (t ErrorType) String() string { // TypeInvalid returns a *Error indicating "type is invalid" func TypeInvalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeTypeInvalid, field.String(), value, detail} + return &Error{ErrorTypeTypeInvalid, field.String(), value, detail, "", false} } // NotFound returns a *Error indicating "value not found". This is // used to report failure to find a requested value (e.g. looking up an ID). func NotFound(field *Path, value interface{}) *Error { - return &Error{ErrorTypeNotFound, field.String(), value, ""} + return &Error{ErrorTypeNotFound, field.String(), value, "", "", false} } // Required returns a *Error indicating "value required". This is used // to report required values that are not provided (e.g. empty strings, null // values, or empty arrays). func Required(field *Path, detail string) *Error { - return &Error{ErrorTypeRequired, field.String(), "", detail} + return &Error{ErrorTypeRequired, field.String(), "", detail, "", false} } // Duplicate returns a *Error indicating "duplicate value". This is // used to report collisions of values that must be unique (e.g. names or IDs). func Duplicate(field *Path, value interface{}) *Error { - return &Error{ErrorTypeDuplicate, field.String(), value, ""} + return &Error{ErrorTypeDuplicate, field.String(), value, "", "", false} } // Invalid returns a *Error indicating "invalid value". This is used // to report malformed values (e.g. failed regex match, too long, out of bounds). func Invalid(field *Path, value interface{}, detail string) *Error { - return &Error{ErrorTypeInvalid, field.String(), value, detail} + return &Error{ErrorTypeInvalid, field.String(), value, detail, "", false} } // NotSupported returns a *Error indicating "unsupported value". @@ -209,7 +243,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E } detail = "supported values: " + strings.Join(quotedValues, ", ") } - return &Error{ErrorTypeNotSupported, field.String(), value, detail} + return &Error{ErrorTypeNotSupported, field.String(), value, detail, "", false} } // Forbidden returns a *Error indicating "forbidden". This is used to @@ -217,7 +251,7 @@ func NotSupported[T ~string](field *Path, value interface{}, validValues []T) *E // some conditions, but which are not permitted by current conditions (e.g. // security policy). func Forbidden(field *Path, detail string) *Error { - return &Error{ErrorTypeForbidden, field.String(), "", detail} + return &Error{ErrorTypeForbidden, field.String(), "", detail, "", false} } // TooLong returns a *Error indicating "too long". This is used to report that @@ -231,7 +265,7 @@ func TooLong(field *Path, value interface{}, maxLength int) *Error { } else { msg = "value is too long" } - return &Error{ErrorTypeTooLong, field.String(), "", msg} + return &Error{ErrorTypeTooLong, field.String(), "", msg, "", false} } // TooLongMaxLength returns a *Error indicating "too long". @@ -259,14 +293,14 @@ func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { actual = omitValue } - return &Error{ErrorTypeTooMany, field.String(), actual, msg} + return &Error{ErrorTypeTooMany, field.String(), actual, msg, "", false} } // InternalError returns a *Error indicating "internal error". This is used // to signal that an error was found that was not directly related to user // input. The err argument must be non-nil. func InternalError(field *Path, err error) *Error { - return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} + return &Error{ErrorTypeInternal, field.String(), nil, err.Error(), "", false} } // ErrorList holds a set of Errors. It is plausible that we might one day have @@ -285,6 +319,22 @@ func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { } } +// WithOrigin sets the origin for all errors in the list and returns the updated list. +func (list ErrorList) WithOrigin(origin string) ErrorList { + for _, err := range list { + err.Origin = origin + } + return list +} + +// MarkCoveredByDeclarative marks all errors in the list as covered by declarative validation. +func (list ErrorList) MarkCoveredByDeclarative() ErrorList { + for _, err := range list { + err.CoveredByDeclarative = true + } + return list +} + // ToAggregate converts the ErrorList into an errors.Aggregate. func (list ErrorList) ToAggregate() utilerrors.Aggregate { if len(list) == 0 { @@ -321,3 +371,25 @@ func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { // FilterOut takes an Aggregate and returns an Aggregate return fromAggregate(err.(utilerrors.Aggregate)) } + +// ExtractCoveredByDeclarative returns a new ErrorList containing only the errors that should be covered by declarative validation. +func (list ErrorList) ExtractCoveredByDeclarative() ErrorList { + newList := ErrorList{} + for _, err := range list { + if err.CoveredByDeclarative { + newList = append(newList, err) + } + } + return newList +} + +// RemoveCoveredByDeclarative returns a new ErrorList containing only the errors that should not be covered by declarative validation. +func (list ErrorList) RemoveCoveredByDeclarative() ErrorList { + newList := ErrorList{} + for _, err := range list { + if !err.CoveredByDeclarative { + newList = append(newList, err) + } + } + return newList +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go new file mode 100644 index 0000000000..6e947c74c9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/ip.go @@ -0,0 +1,278 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "net" + "net/netip" + "slices" + + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/klog/v2" + netutils "k8s.io/utils/net" +) + +func parseIP(fldPath *field.Path, value string, strictValidation bool) (net.IP, field.ErrorList) { + var allErrors field.ErrorList + + ip := netutils.ParseIPSloppy(value) + if ip == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) + return nil, allErrors + } + + if strictValidation { + addr, err := netip.ParseAddr(value) + if err != nil { + // If netutils.ParseIPSloppy parsed it, but netip.ParseAddr + // doesn't, then it must have illegal leading 0s. + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s")) + } + if addr.Is4In6() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not be an IPv4-mapped IPv6 address")) + } + } + + return ip, allErrors +} + +// IsValidIPForLegacyField tests that the argument is a valid IP address for a "legacy" +// API field that predates strict IP validation. In particular, this allows IPs that are +// not in canonical form (e.g., "FE80:0:0:0:0:0:0:0abc" instead of "fe80::abc"). +// +// If strictValidation is false, this also allows IPs in certain invalid or ambiguous +// formats: +// +// 1. IPv4 IPs are allowed to have leading "0"s in octets (e.g. "010.002.003.004"). +// Historically, net.ParseIP (and later netutils.ParseIPSloppy) simply ignored leading +// "0"s in IPv4 addresses, but most libc-based software treats 0-prefixed IPv4 octets +// as octal, meaning different software might interpret the same string as two +// different IPs, potentially leading to security issues. (Current net.ParseIP and +// netip.ParseAddr simply reject inputs with leading "0"s.) +// +// 2. IPv4-mapped IPv6 IPs (e.g. "::ffff:1.2.3.4") are allowed. These can also lead to +// different software interpreting the value in different ways, because they may be +// treated as IPv4 by some software and IPv6 by other software. (net.ParseIP and +// netip.ParseAddr both allow these, but there are no use cases for representing IPv4 +// addresses as IPv4-mapped IPv6 addresses in Kubernetes.) +// +// Alternatively, when validating an update to an existing field, you can pass a list of +// IP values from the old object that should be accepted if they appear in the new object +// even if they are not valid. +// +// This function should only be used to validate the existing fields that were +// historically validated in this way, and strictValidation should be true unless the +// StrictIPCIDRValidation feature gate is disabled. Use IsValidIP for parsing new fields. +func IsValidIPForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldIPs []string) field.ErrorList { + if slices.Contains(validOldIPs, value) { + return nil + } + _, allErrors := parseIP(fldPath, value, strictValidation) + return allErrors.WithOrigin("format=ip-sloppy") +} + +// IsValidIP tests that the argument is a valid IP address, according to current +// Kubernetes standards for IP address validation. +func IsValidIP(fldPath *field.Path, value string) field.ErrorList { + ip, allErrors := parseIP(fldPath, value, true) + if len(allErrors) != 0 { + return allErrors.WithOrigin("format=ip-strict") + } + + if value != ip.String() { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ip.String()))) + } + return allErrors.WithOrigin("format=ip-strict") +} + +// GetWarningsForIP returns warnings for IP address values in non-standard forms. This +// should only be used with fields that are validated with IsValidIPForLegacyField(). +func GetWarningsForIP(fldPath *field.Path, value string) []string { + ip := netutils.ParseIPSloppy(value) + if ip == nil { + klog.ErrorS(nil, "GetWarningsForIP called on value that was not validated with IsValidIPForLegacyField", "field", fldPath, "value", value) + return nil + } + + addr, _ := netip.ParseAddr(value) + if !addr.IsValid() || addr.Is4In6() { + // This catches 2 cases: leading 0s (if ParseIPSloppy() accepted it but + // ParseAddr() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way, + // re-stringifying the net.IP value will give the preferred form. + return []string{ + fmt.Sprintf("%s: non-standard IP address %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ip.String()), + } + } + + // If ParseIPSloppy() and ParseAddr() both accept it then it's fully valid, though + // it may be non-canonical. + if addr.Is6() && addr.String() != value { + return []string{ + fmt.Sprintf("%s: IPv6 address %q should be in RFC 5952 canonical format (%q)", fldPath, value, addr.String()), + } + } + + return nil +} + +func parseCIDR(fldPath *field.Path, value string, strictValidation bool) (*net.IPNet, field.ErrorList) { + var allErrors field.ErrorList + + _, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) + return nil, allErrors + } + + if strictValidation { + prefix, err := netip.ParsePrefix(value) + if err != nil { + // If netutils.ParseCIDRSloppy parsed it, but netip.ParsePrefix + // doesn't, then it must have illegal leading 0s (either in the + // IP part or the prefix). + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have leading 0s in IP or prefix length")) + } else if prefix.Addr().Is4In6() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have an IPv4-mapped IPv6 address")) + } else if prefix.Addr() != prefix.Masked().Addr() { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must not have bits set beyond the prefix length")) + } + } + + return ipnet, allErrors +} + +// IsValidCIDRForLegacyField tests that the argument is a valid CIDR value for a "legacy" +// API field that predates strict IP validation. In particular, this allows IPs that are +// not in canonical form (e.g., "FE80:0abc:0:0:0:0:0:0/64" instead of "fe80:abc::/64"). +// +// If strictValidation is false, this also allows CIDR values in certain invalid or +// ambiguous formats: +// +// 1. The IP part of the CIDR value is parsed as with IsValidIPForLegacyField with +// strictValidation=false. +// +// 2. The CIDR value is allowed to be either a "subnet"/"mask" (with the lower bits after +// the prefix length all being 0), or an "interface address" as with `ip addr` (with a +// complete IP address and associated subnet length). With strict validation, the +// value is required to be in "subnet"/"mask" form. +// +// 3. The prefix length is allowed to have leading 0s. +// +// Alternatively, when validating an update to an existing field, you can pass a list of +// CIDR values from the old object that should be accepted if they appear in the new +// object even if they are not valid. +// +// This function should only be used to validate the existing fields that were +// historically validated in this way, and strictValidation should be true unless the +// StrictIPCIDRValidation feature gate is disabled. Use IsValidCIDR or +// IsValidInterfaceAddress for parsing new fields. +func IsValidCIDRForLegacyField(fldPath *field.Path, value string, strictValidation bool, validOldCIDRs []string) field.ErrorList { + if slices.Contains(validOldCIDRs, value) { + return nil + } + + _, allErrors := parseCIDR(fldPath, value, strictValidation) + return allErrors +} + +// IsValidCIDR tests that the argument is a valid CIDR value, according to current +// Kubernetes standards for CIDR validation. This function is only for +// "subnet"/"mask"-style CIDR values (e.g., "192.168.1.0/24", with no bits set beyond the +// prefix length). Use IsValidInterfaceAddress for "ifaddr"-style CIDR values. +func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { + ipnet, allErrors := parseCIDR(fldPath, value, true) + if len(allErrors) != 0 { + return allErrors + } + + if value != ipnet.String() { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", ipnet.String()))) + } + return allErrors +} + +// GetWarningsForCIDR returns warnings for CIDR values in non-standard forms. This should +// only be used with fields that are validated with IsValidCIDRForLegacyField(). +func GetWarningsForCIDR(fldPath *field.Path, value string) []string { + ip, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + klog.ErrorS(err, "GetWarningsForCIDR called on value that was not validated with IsValidCIDRForLegacyField", "field", fldPath, "value", value) + return nil + } + + var warnings []string + + // Check for bits set after prefix length + if !ip.Equal(ipnet.IP) { + _, addrlen := ipnet.Mask.Size() + singleIPCIDR := fmt.Sprintf("%s/%d", ip.String(), addrlen) + warnings = append(warnings, + fmt.Sprintf("%s: CIDR value %q is ambiguous in this context (should be %q or %q?)", fldPath, value, ipnet.String(), singleIPCIDR), + ) + } + + prefix, _ := netip.ParsePrefix(value) + addr := prefix.Addr() + if !prefix.IsValid() || addr.Is4In6() { + // This catches 2 cases: leading 0s (if ParseCIDRSloppy() accepted it but + // ParsePrefix() doesn't) or IPv4-mapped IPv6 (.Is4In6()). Either way, + // re-stringifying the net.IPNet value will give the preferred form. + warnings = append(warnings, + fmt.Sprintf("%s: non-standard CIDR value %q will be considered invalid in a future Kubernetes release: use %q", fldPath, value, ipnet.String()), + ) + } + + // If ParseCIDRSloppy() and ParsePrefix() both accept it then it's fully valid, + // though it may be non-canonical. But only check this if there are no other + // warnings, since either of the other warnings would also cause a round-trip + // failure. + if len(warnings) == 0 && addr.Is6() && prefix.String() != value { + warnings = append(warnings, + fmt.Sprintf("%s: IPv6 CIDR value %q should be in RFC 5952 canonical format (%q)", fldPath, value, prefix.String()), + ) + } + + return warnings +} + +// IsValidInterfaceAddress tests that the argument is a valid "ifaddr"-style CIDR value in +// canonical form (e.g., "192.168.1.5/24", with a complete IP address and associated +// subnet length). Use IsValidCIDR for "subnet"/"mask"-style CIDR values (e.g., +// "192.168.1.0/24"). +func IsValidInterfaceAddress(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip, ipnet, err := netutils.ParseCIDRSloppy(value) + if err != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid address in CIDR form, (e.g. 10.9.8.7/24 or 2001:db8::1/64)")) + return allErrors + } + + // The canonical form of `value` is not `ipnet.String()`, because `ipnet` doesn't + // include the bits after the prefix. We need to construct the canonical form + // ourselves from `ip` and `ipnet.Mask`. + maskSize, _ := ipnet.Mask.Size() + if netutils.IsIPv4(ip) && maskSize > net.IPv4len*8 { + // "::ffff:192.168.0.1/120" -> "192.168.0.1/24" + maskSize -= (net.IPv6len - net.IPv4len) * 8 + } + canonical := fmt.Sprintf("%s/%d", ip.String(), maskSize) + if value != canonical { + allErrors = append(allErrors, field.Invalid(fldPath, value, fmt.Sprintf("must be in canonical form (%q)", canonical))) + } + return allErrors +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 9bc393cf58..b6be7af1b1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -24,7 +24,6 @@ import ( "unicode" "k8s.io/apimachinery/pkg/util/validation/field" - netutils "k8s.io/utils/net" ) const qnameCharFmt string = "[A-Za-z0-9]" @@ -369,45 +368,6 @@ func IsValidPortName(port string) []string { return errs } -// IsValidIP tests that the argument is a valid IP address. -func IsValidIP(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - if netutils.ParseIPSloppy(value) == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IP address, (e.g. 10.9.8.7 or 2001:db8::ffff)")) - } - return allErrors -} - -// IsValidIPv4Address tests that the argument is a valid IPv4 address. -func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := netutils.ParseIPSloppy(value) - if ip == nil || ip.To4() == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) - } - return allErrors -} - -// IsValidIPv6Address tests that the argument is a valid IPv6 address. -func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := netutils.ParseIPSloppy(value) - if ip == nil || ip.To4() != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) - } - return allErrors -} - -// IsValidCIDR tests that the argument is a valid CIDR value. -func IsValidCIDR(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - _, _, err := netutils.ParseCIDRSloppy(value) - if err != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")) - } - return allErrors -} - const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go index 5b2b22b6d0..da88813da2 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/doc.go @@ -15,4 +15,4 @@ limitations under the License. */ // Package version provides utilities for version number comparisons -package version // import "k8s.io/apimachinery/pkg/util/version" +package version diff --git a/vendor/k8s.io/apimachinery/pkg/util/version/version.go b/vendor/k8s.io/apimachinery/pkg/util/version/version.go index b7812ff2d1..72c0769e6c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/version/version.go +++ b/vendor/k8s.io/apimachinery/pkg/util/version/version.go @@ -33,7 +33,6 @@ type Version struct { semver bool preRelease string buildMetadata string - info apimachineryversion.Info } var ( @@ -456,27 +455,28 @@ func (v *Version) Compare(other string) (int, error) { return v.compareInternal(ov), nil } -// WithInfo returns copy of the version object with requested info +// WithInfo returns copy of the version object. +// Deprecated: The Info field has been removed from the Version struct. This method no longer modifies the Version object. func (v *Version) WithInfo(info apimachineryversion.Info) *Version { result := *v - result.info = info return &result } +// Info returns the version information of a component. +// Deprecated: Use Info() from effective version instead. func (v *Version) Info() *apimachineryversion.Info { if v == nil { return nil } // in case info is empty, or the major and minor in info is different from the actual major and minor - v.info.Major = itoa(v.Major()) - v.info.Minor = itoa(v.Minor()) - if v.info.GitVersion == "" { - v.info.GitVersion = v.String() + return &apimachineryversion.Info{ + Major: Itoa(v.Major()), + Minor: Itoa(v.Minor()), + GitVersion: v.String(), } - return &v.info } -func itoa(i uint) string { +func Itoa(i uint) string { if i == 0 { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go index 4187619256..177be09a95 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go @@ -157,6 +157,8 @@ func (b Backoff) DelayWithReset(c clock.Clock, resetInterval time.Duration) Dela // Until is syntactic sugar on top of JitterUntil with zero jitter factor and // with sliding = true (which means the timer for period starts after the f // completes). +// +// Contextual logging: UntilWithContext should be used instead of Until in code which supports contextual logging. func Until(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, true, stopCh) } @@ -176,6 +178,8 @@ func UntilWithContext(ctx context.Context, f func(context.Context), period time. // NonSlidingUntil is syntactic sugar on top of JitterUntil with zero jitter // factor, with sliding = false (meaning the timer for period starts at the same // time as the function starts). +// +// Contextual logging: NonSlidingUntilWithContext should be used instead of NonSlidingUntil in code which supports contextual logging. func NonSlidingUntil(f func(), period time.Duration, stopCh <-chan struct{}) { JitterUntil(f, period, 0.0, false, stopCh) } @@ -200,19 +204,44 @@ func NonSlidingUntilWithContext(ctx context.Context, f func(context.Context), pe // // Close stopCh to stop. f may not be invoked if stop channel is already // closed. Pass NeverStop to if you don't want it stop. +// +// Contextual logging: JitterUntilWithContext should be used instead of JitterUntil in code which supports contextual logging. func JitterUntil(f func(), period time.Duration, jitterFactor float64, sliding bool, stopCh <-chan struct{}) { BackoffUntil(f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding, stopCh) } +// JitterUntilWithContext loops until context is done, running f every period. +// +// If jitterFactor is positive, the period is jittered before every run of f. +// If jitterFactor is not positive, the period is unchanged and not jittered. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +// +// Cancel context to stop. f may not be invoked if context is already done. +func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { + BackoffUntilWithContext(ctx, f, NewJitteredBackoffManager(period, jitterFactor, &clock.RealClock{}), sliding) +} + // BackoffUntil loops until stop channel is closed, run f every duration given by BackoffManager. // // If sliding is true, the period is computed after f runs. If it is false then // period includes the runtime for f. +// +// Contextual logging: BackoffUntilWithContext should be used instead of BackoffUntil in code which supports contextual logging. func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan struct{}) { + BackoffUntilWithContext(ContextForChannel(stopCh), func(context.Context) { f() }, backoff, sliding) +} + +// BackoffUntilWithContext loops until context is done, run f every duration given by BackoffManager. +// +// If sliding is true, the period is computed after f runs. If it is false then +// period includes the runtime for f. +func BackoffUntilWithContext(ctx context.Context, f func(ctx context.Context), backoff BackoffManager, sliding bool) { var t clock.Timer for { select { - case <-stopCh: + case <-ctx.Done(): return default: } @@ -222,8 +251,8 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan } func() { - defer runtime.HandleCrash() - f() + defer runtime.HandleCrashWithContext(ctx) + f(ctx) }() if sliding { @@ -236,7 +265,7 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan // In order to mitigate we re-check stopCh at the beginning // of every loop to prevent extra executions of f(). select { - case <-stopCh: + case <-ctx.Done(): if !t.Stop() { <-t.C() } @@ -246,19 +275,6 @@ func BackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chan } } -// JitterUntilWithContext loops until context is done, running f every period. -// -// If jitterFactor is positive, the period is jittered before every run of f. -// If jitterFactor is not positive, the period is unchanged and not jittered. -// -// If sliding is true, the period is computed after f runs. If it is false then -// period includes the runtime for f. -// -// Cancel context to stop. f may not be invoked if context is already expired. -func JitterUntilWithContext(ctx context.Context, f func(context.Context), period time.Duration, jitterFactor float64, sliding bool) { - JitterUntil(func() { f(ctx) }, period, jitterFactor, sliding, ctx.Done()) -} - // backoffManager provides simple backoff behavior in a threadsafe manner to a caller. type backoffManager struct { backoff Backoff diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go index 3f0c968ec9..ff89dc170e 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package wait provides tools for polling or listening for changes // to a condition. -package wait // import "k8s.io/apimachinery/pkg/util/wait" +package wait diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go index 107bfc132f..9f9b929ffa 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/loop.go @@ -49,7 +49,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding // if we haven't requested immediate execution, delay once if immediate { if ok, err := func() (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) }(); err != nil || ok { return err @@ -83,7 +83,7 @@ func loopConditionUntilContext(ctx context.Context, t Timer, immediate, sliding t.Next() } if ok, err := func() (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) }(); err != nil || ok { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go index 6805e8cf94..7379a8d5ac 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go +++ b/vendor/k8s.io/apimachinery/pkg/util/wait/wait.go @@ -80,6 +80,10 @@ func Forever(f func(), period time.Duration) { Until(f, period, NeverStop) } +// jitterRand is a dedicated random source for jitter calculations. +// It defaults to rand.Float64, but is a package variable so it can be overridden to make unit tests deterministic. +var jitterRand = rand.Float64 + // Jitter returns a time.Duration between duration and duration + maxFactor * // duration. // @@ -89,7 +93,7 @@ func Jitter(duration time.Duration, maxFactor float64) time.Duration { if maxFactor <= 0.0 { maxFactor = 1.0 } - wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration)) + wait := duration + time.Duration(jitterRand()*maxFactor*float64(duration)) return wait } @@ -141,6 +145,7 @@ func (c channelContext) Value(key any) any { return nil } // // Deprecated: Will be removed when the legacy polling methods are removed. func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { + //nolint:logcheck // Already deprecated. defer runtime.HandleCrash() return condition() } @@ -150,7 +155,7 @@ func runConditionWithCrashProtection(condition ConditionFunc) (bool, error) { // // Deprecated: Will be removed when the legacy polling methods are removed. func runConditionWithCrashProtectionWithContext(ctx context.Context, condition ConditionWithContextFunc) (bool, error) { - defer runtime.HandleCrash() + defer runtime.HandleCrashWithContext(ctx) return condition(ctx) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go index a6f29cd7c4..6eb7903a73 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/util/waitgroup/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package waitgroup implements SafeWaitGroup wrap of sync.WaitGroup. // Add with positive delta when waiting will fail, to prevent sync.WaitGroup race issue. -package waitgroup // import "k8s.io/apimachinery/pkg/util/waitgroup" +package waitgroup diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index 9837b3df28..bd91a189e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -20,10 +20,12 @@ import ( "bufio" "bytes" "encoding/json" + "errors" "fmt" "io" "strings" "unicode" + "unicode/utf8" jsonutil "k8s.io/apimachinery/pkg/util/json" @@ -92,7 +94,7 @@ func UnmarshalStrict(data []byte, v interface{}) error { // YAML decoding path is not used (so that error messages are // JSON specific). func ToJSON(data []byte) ([]byte, error) { - if hasJSONPrefix(data) { + if IsJSONBuffer(data) { return data, nil } return yaml.YAMLToJSON(data) @@ -102,7 +104,8 @@ func ToJSON(data []byte) ([]byte, error) { // separating individual documents. It first converts the YAML // body to JSON, then unmarshals the JSON. type YAMLToJSONDecoder struct { - reader Reader + reader Reader + inputOffset int } // NewYAMLToJSONDecoder decodes YAML documents from the provided @@ -121,7 +124,7 @@ func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { // yaml.Unmarshal. func (d *YAMLToJSONDecoder) Decode(into interface{}) error { bytes, err := d.reader.Read() - if err != nil && err != io.EOF { + if err != nil && err != io.EOF { //nolint:errorlint return err } @@ -131,9 +134,14 @@ func (d *YAMLToJSONDecoder) Decode(into interface{}) error { return YAMLSyntaxError{err} } } + d.inputOffset += len(bytes) return err } +func (d *YAMLToJSONDecoder) InputOffset() int { + return d.inputOffset +} + // YAMLDecoder reads chunks of objects and returns ErrShortBuffer if // the data is not sufficient. type YAMLDecoder struct { @@ -229,18 +237,22 @@ func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err return 0, nil, nil } -// decoder is a convenience interface for Decode. -type decoder interface { - Decode(into interface{}) error -} - -// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or -// YAML documents by sniffing for a leading { character. +// YAMLOrJSONDecoder attempts to decode a stream of JSON or YAML documents. +// While JSON is YAML, the way Go's JSON decode defines a multi-document stream +// is a series of JSON objects (e.g. {}{}), but YAML defines a multi-document +// stream as a series of documents separated by "---". +// +// This decoder will attempt to decode the stream as JSON first, and if that +// fails, it will switch to YAML. Once it determines the stream is JSON (by +// finding a non-YAML-delimited series of objects), it will not switch to YAML. +// Once it switches to YAML it will not switch back to JSON. type YAMLOrJSONDecoder struct { - r io.Reader - bufferSize int - - decoder decoder + json *json.Decoder + jsonConsumed int64 // of the stream total, how much was JSON? + yaml *YAMLToJSONDecoder + yamlConsumed int64 // of the stream total, how much was YAML? + stream *StreamReader + count int // how many objects have been decoded } type JSONSyntaxError struct { @@ -265,31 +277,113 @@ func (e YAMLSyntaxError) Error() string { // how far into the stream the decoder will look to figure out whether this // is a JSON stream (has whitespace followed by an open brace). func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { - return &YAMLOrJSONDecoder{ - r: r, - bufferSize: bufferSize, + d := &YAMLOrJSONDecoder{} + + reader, _, mightBeJSON := GuessJSONStream(r, bufferSize) + d.stream = reader + if mightBeJSON { + d.json = json.NewDecoder(reader) + } else { + d.yaml = NewYAMLToJSONDecoder(reader) } + return d } // Decode unmarshals the next object from the underlying stream into the // provide object, or returns an error. func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { - if d.decoder == nil { - buffer, _, isJSON := GuessJSONStream(d.r, d.bufferSize) - if isJSON { - d.decoder = json.NewDecoder(buffer) + // Because we don't know if this is a JSON or YAML stream, a failure from + // both decoders is ambiguous. When in doubt, it will return the error from + // the JSON decoder. Unfortunately, this means that if the first document + // is invalid YAML, the error won't be awesome. + // TODO: the errors from YAML are not great, we could improve them a lot. + var firstErr error + if d.json != nil { + err := d.json.Decode(into) + if err == nil { + d.count++ + consumed := d.json.InputOffset() - d.jsonConsumed + d.stream.Consume(int(consumed)) + d.jsonConsumed += consumed + return nil + } + if err == io.EOF { //nolint:errorlint + return err + } + var syntax *json.SyntaxError + if ok := errors.As(err, &syntax); ok { + firstErr = JSONSyntaxError{ + Offset: syntax.Offset, + Err: syntax, + } } else { - d.decoder = NewYAMLToJSONDecoder(buffer) + firstErr = err + } + if d.count > 1 { + // If we found 0 or 1 JSON object(s), this stream is still + // ambiguous. But if we found more than 1 JSON object, then this + // is an unambiguous JSON stream, and we should not switch to YAML. + return err + } + // If JSON decoding hits the end of one object and then fails on the + // next, it leaves any leading whitespace in the buffer, which can + // confuse the YAML decoder. We just eat any whitespace we find, up to + // and including the first newline. + d.stream.Rewind() + if err := d.consumeWhitespace(); err == nil { + d.yaml = NewYAMLToJSONDecoder(d.stream) + } + d.json = nil + } + if d.yaml != nil { + err := d.yaml.Decode(into) + if err == nil { + consumed := int64(d.yaml.InputOffset()) - d.yamlConsumed + d.stream.Consume(int(consumed)) + d.yamlConsumed += consumed + d.count++ + return nil + } + if err == io.EOF { //nolint:errorlint + return err + } + if firstErr == nil { + firstErr = err } } - err := d.decoder.Decode(into) - if syntax, ok := err.(*json.SyntaxError); ok { - return JSONSyntaxError{ - Offset: syntax.Offset, - Err: syntax, + if firstErr != nil { + return firstErr + } + return fmt.Errorf("decoding failed as both JSON and YAML") +} + +func (d *YAMLOrJSONDecoder) consumeWhitespace() error { + consumed := 0 + for { + buf, err := d.stream.ReadN(4) + if err != nil && err == io.EOF { //nolint:errorlint + return err + } + r, sz := utf8.DecodeRune(buf) + if r == utf8.RuneError || sz == 0 { + return fmt.Errorf("invalid utf8 rune") } + d.stream.RewindN(len(buf) - sz) + if !unicode.IsSpace(r) { + d.stream.RewindN(sz) + d.stream.Consume(consumed) + return nil + } + if r == '\n' { + d.stream.Consume(consumed) + return nil + } + if err == io.EOF { //nolint:errorlint + break + } + consumed += sz } - return err + return io.EOF } type Reader interface { @@ -311,7 +405,7 @@ func (r *YAMLReader) Read() ([]byte, error) { var buffer bytes.Buffer for { line, err := r.reader.Read() - if err != nil && err != io.EOF { + if err != nil && err != io.EOF { //nolint:errorlint return nil, err } @@ -329,11 +423,11 @@ func (r *YAMLReader) Read() ([]byte, error) { if buffer.Len() != 0 { return buffer.Bytes(), nil } - if err == io.EOF { + if err == io.EOF { //nolint:errorlint return nil, err } } - if err == io.EOF { + if err == io.EOF { //nolint:errorlint if buffer.Len() != 0 { // If we're at EOF, we have a final, non-terminated line. Return it. return buffer.Bytes(), nil @@ -369,26 +463,20 @@ func (r *LineReader) Read() ([]byte, error) { // GuessJSONStream scans the provided reader up to size, looking // for an open brace indicating this is JSON. It will return the // bufio.Reader it creates for the consumer. -func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { - buffer := bufio.NewReaderSize(r, size) +func GuessJSONStream(r io.Reader, size int) (*StreamReader, []byte, bool) { + buffer := NewStreamReader(r, size) b, _ := buffer.Peek(size) - return buffer, b, hasJSONPrefix(b) + return buffer, b, IsJSONBuffer(b) } // IsJSONBuffer scans the provided buffer, looking // for an open brace indicating this is JSON. func IsJSONBuffer(buf []byte) bool { - return hasJSONPrefix(buf) + return hasPrefix(buf, jsonPrefix) } var jsonPrefix = []byte("{") -// hasJSONPrefix returns true if the provided buffer appears to start with -// a JSON open brace. -func hasJSONPrefix(buf []byte) bool { - return hasPrefix(buf, jsonPrefix) -} - // Return true if the first non-whitespace bytes in buf is // prefix. func hasPrefix(buf []byte, prefix []byte) bool { diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go new file mode 100644 index 0000000000..d06991057f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/stream_reader.go @@ -0,0 +1,130 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import "io" + +// StreamReader is a reader designed for consuming streams of variable-length +// messages. It buffers data until it is explicitly consumed, and can be +// rewound to re-read previous data. +type StreamReader struct { + r io.Reader + buf []byte + head int // current read offset into buf + ttlConsumed int // number of bytes which have been consumed +} + +// NewStreamReader creates a new StreamReader wrapping the provided +// io.Reader. +func NewStreamReader(r io.Reader, size int) *StreamReader { + if size == 0 { + size = 4096 + } + return &StreamReader{ + r: r, + buf: make([]byte, 0, size), // Start with a reasonable capacity + } +} + +// Read implements io.Reader. It first returns any buffered data after the +// current offset, and if that's exhausted, reads from the underlying reader +// and buffers the data. The returned data is not considered consumed until the +// Consume method is called. +func (r *StreamReader) Read(p []byte) (n int, err error) { + // If we have buffered data, return it + if r.head < len(r.buf) { + n = copy(p, r.buf[r.head:]) + r.head += n + return n, nil + } + + // If we've already hit EOF, return it + if r.r == nil { + return 0, io.EOF + } + + // Read from the underlying reader + n, err = r.r.Read(p) + if n > 0 { + r.buf = append(r.buf, p[:n]...) + r.head += n + } + if err == nil { + return n, nil + } + if err == io.EOF { + // Store that we've hit EOF by setting r to nil + r.r = nil + } + return n, err +} + +// ReadN reads exactly n bytes from the reader, blocking until all bytes are +// read or an error occurs. If an error occurs, the number of bytes read is +// returned along with the error. If EOF is hit before n bytes are read, this +// will return the bytes read so far, along with io.EOF. The returned data is +// not considered consumed until the Consume method is called. +func (r *StreamReader) ReadN(want int) ([]byte, error) { + ret := make([]byte, want) + off := 0 + for off < want { + n, err := r.Read(ret[off:]) + if err != nil { + return ret[:off+n], err + } + off += n + } + return ret, nil +} + +// Peek returns the next n bytes without advancing the reader. The returned +// bytes are valid until the next call to Consume. +func (r *StreamReader) Peek(n int) ([]byte, error) { + buf, err := r.ReadN(n) + r.RewindN(len(buf)) + if err != nil { + return buf, err + } + return buf, nil +} + +// Rewind resets the reader to the beginning of the buffered data. +func (r *StreamReader) Rewind() { + r.head = 0 +} + +// RewindN rewinds the reader by n bytes. If n is greater than the current +// buffer, the reader is rewound to the beginning of the buffer. +func (r *StreamReader) RewindN(n int) { + r.head -= min(n, r.head) +} + +// Consume discards up to n bytes of previously read data from the beginning of +// the buffer. Once consumed, that data is no longer available for rewinding. +// If n is greater than the current buffer, the buffer is cleared. Consume +// never consume data from the underlying reader. +func (r *StreamReader) Consume(n int) { + n = min(n, len(r.buf)) + r.buf = r.buf[n:] + r.head -= n + r.ttlConsumed += n +} + +// Consumed returns the number of bytes consumed from the input reader. +func (r *StreamReader) Consumed() int { + return r.ttlConsumed +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 29574fd6d5..5f446a4f4a 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -16,5 +16,5 @@ limitations under the License. // +k8s:openapi-gen=true -// Package version supplies the type for version information collected at build time. -package version // import "k8s.io/apimachinery/pkg/version" +// Package version supplies the type for version information. +package version diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go index 72727b503b..6a18f9e91d 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/types.go +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -20,15 +20,25 @@ package version // TODO: Add []string of api versions supported? It's still unclear // how we'll want to distribute that information. type Info struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - GitCommit string `json:"gitCommit"` - GitTreeState string `json:"gitTreeState"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` + // Major is the major version of the binary version + Major string `json:"major"` + // Minor is the minor version of the binary version + Minor string `json:"minor"` + // EmulationMajor is the major version of the emulation version + EmulationMajor string `json:"emulationMajor,omitempty"` + // EmulationMinor is the minor version of the emulation version + EmulationMinor string `json:"emulationMinor,omitempty"` + // MinCompatibilityMajor is the major version of the minimum compatibility version + MinCompatibilityMajor string `json:"minCompatibilityMajor,omitempty"` + // MinCompatibilityMinor is the minor version of the minimum compatibility version + MinCompatibilityMinor string `json:"minCompatibilityMinor,omitempty"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` } // String returns info as a human-friendly version string. diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go index 7e6bf3fb95..5fde5e7427 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package watch contains a generic watchable interface, and a fake for // testing code that uses the watch interface. -package watch // import "k8s.io/apimachinery/pkg/watch" +package watch diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 42dcac2b9e..b422ca9f55 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -51,6 +51,7 @@ type Reporter interface { // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { + logger klog.Logger sync.Mutex source Decoder reporter Reporter @@ -59,8 +60,16 @@ type StreamWatcher struct { } // NewStreamWatcher creates a StreamWatcher from the given decoder. +// +// Contextual logging: NewStreamWatcherWithLogger should be used instead of NewStreamWatcher in code which supports contextual logging. func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { + return NewStreamWatcherWithLogger(klog.Background(), d, r) +} + +// NewStreamWatcherWithLogger creates a StreamWatcher from the given decoder and logger. +func NewStreamWatcherWithLogger(logger klog.Logger, d Decoder, r Reporter) *StreamWatcher { sw := &StreamWatcher{ + logger: logger, source: d, reporter: r, // It's easy for a consumer to add buffering via an extra @@ -98,7 +107,7 @@ func (sw *StreamWatcher) Stop() { // receive reads result from the decoder in a loop and sends down the result channel. func (sw *StreamWatcher) receive() { - defer utilruntime.HandleCrash() + defer utilruntime.HandleCrashWithLogger(sw.logger) defer close(sw.result) defer sw.Stop() for { @@ -108,10 +117,10 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + sw.logger.V(1).Info("Unexpected EOF during watch stream event decoding", "err", err) default: if net.IsProbableEOF(err) || net.IsTimeout(err) { - klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) + sw.logger.V(5).Info("Unable to decode an event from the watch stream", "err", err) } else { select { case <-sw.done: diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index ce37fd8c18..251459834b 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -23,6 +23,7 @@ import ( "k8s.io/klog/v2" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ) // Interface can be implemented by anything that knows how to watch and report changes. @@ -103,29 +104,42 @@ func (w emptyWatch) ResultChan() <-chan Event { // FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type FakeWatcher struct { + logger klog.Logger result chan Event stopped bool sync.Mutex } +var _ Interface = &FakeWatcher{} + +// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging. func NewFake() *FakeWatcher { - return &FakeWatcher{ - result: make(chan Event), - } + return NewFakeWithOptions(FakeOptions{}) } +// Contextual logging: NewFakeWithOptions and a logger in the FakeOptions should be used instead in code which supports contextual logging. func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return NewFakeWithOptions(FakeOptions{ChannelSize: size}) +} + +func NewFakeWithOptions(options FakeOptions) *FakeWatcher { return &FakeWatcher{ - result: make(chan Event, size), + logger: ptr.Deref(options.Logger, klog.Background()), + result: make(chan Event, options.ChannelSize), } } +type FakeOptions struct { + Logger *klog.Logger + ChannelSize int +} + // Stop implements Interface.Stop(). func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.stopped { - klog.V(4).Infof("Stopping fake watcher.") + f.logger.V(4).Info("Stopping fake watcher") close(f.result) f.stopped = true } @@ -176,13 +190,22 @@ func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { // RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type RaceFreeFakeWatcher struct { + logger klog.Logger result chan Event Stopped bool sync.Mutex } +var _ Interface = &RaceFreeFakeWatcher{} + +// Contextual logging: RaceFreeFakeWatcherWithLogger should be used instead of NewRaceFreeFake in code which supports contextual logging. func NewRaceFreeFake() *RaceFreeFakeWatcher { + return NewRaceFreeFakeWithLogger(klog.Background()) +} + +func NewRaceFreeFakeWithLogger(logger klog.Logger) *RaceFreeFakeWatcher { return &RaceFreeFakeWatcher{ + logger: logger, result: make(chan Event, DefaultChanSize), } } @@ -192,7 +215,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - klog.V(4).Infof("Stopping fake watcher.") + f.logger.V(4).Info("Stopping fake watcher") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go index 08b6246ceb..25e4fd09eb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/fuzz.go @@ -4,7 +4,7 @@ import ( "math/rand" "strings" - fuzz "github.com/google/gofuzz" + "sigs.k8s.io/randfill" "k8s.io/kube-openapi/pkg/validation/spec" ) @@ -25,15 +25,15 @@ func randAlphanumString() string { } var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ - func(s *string, c fuzz.Continue) { + func(s *string, c randfill.Continue) { // All OpenAPI V3 map keys must follow the corresponding // regex. Note that this restricts the range for all other // string values as well. str := randAlphanumString() *s = str }, - func(o *OpenAPI, c fuzz.Continue) { - c.FuzzNoCustom(o) + func(o *OpenAPI, c randfill.Continue) { + c.FillNoCustom(o) o.Version = "3.0.0" for i, val := range o.SecurityRequirement { if val == nil { @@ -48,45 +48,45 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ } }, - func(r *interface{}, c fuzz.Continue) { + func(r *interface{}, c randfill.Continue) { switch c.Intn(3) { case 0: *r = nil case 1: - n := c.RandString() + "x" + n := c.String(0) + "x" *r = n case 2: n := c.Float64() *r = n } }, - func(v **spec.Info, c fuzz.Continue) { + func(v **spec.Info, c randfill.Continue) { // Info is never nil *v = &spec.Info{} - c.FuzzNoCustom(*v) - (*v).Title = c.RandString() + "x" + c.FillNoCustom(*v) + (*v).Title = c.String(0) + "x" }, - func(v *Paths, c fuzz.Continue) { - c.Fuzz(&v.VendorExtensible) + func(v *Paths, c randfill.Continue) { + c.Fill(&v.VendorExtensible) num := c.Intn(5) if num > 0 { v.Paths = make(map[string]*Path) } for i := 0; i < num; i++ { val := Path{} - c.Fuzz(&val) - v.Paths["/"+c.RandString()] = &val + c.Fill(&val) + v.Paths["/"+c.String(0)] = &val } }, - func(v *SecurityScheme, c fuzz.Continue) { + func(v *SecurityScheme, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } switch c.Intn(4) { case 0: v.Type = "apiKey" - v.Name = c.RandString() + "x" + v.Name = c.String(0) + "x" switch c.Intn(3) { case 0: v.In = "query" @@ -101,17 +101,17 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ v.Type = "oauth2" v.Flows = make(map[string]*OAuthFlow) flow := OAuthFlow{} - flow.AuthorizationUrl = c.RandString() + "x" + flow.AuthorizationUrl = c.String(0) + "x" v.Flows["implicit"] = &flow flow.Scopes = make(map[string]string) flow.Scopes["foo"] = "bar" case 3: v.Type = "openIdConnect" - v.OpenIdConnectUrl = "https://" + c.RandString() + v.OpenIdConnectUrl = "https://" + c.String(0) } v.Scheme = "basic" }, - func(v *spec.Ref, c fuzz.Continue) { + func(v *spec.Ref, c randfill.Continue) { switch c.Intn(7) { case 0: *v = spec.MustCreateRef("#/components/schemas/" + randAlphanumString()) @@ -127,13 +127,13 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ *v = spec.MustCreateRef("#/components/requestBodies/" + randAlphanumString()) } }, - func(v *Parameter, c fuzz.Continue) { + func(v *Parameter, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.ParameterProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.ParameterProps) + c.Fill(&v.VendorExtensible) switch c.Intn(3) { case 0: @@ -145,44 +145,44 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ v.In = "cookie" } }, - func(v *RequestBody, c fuzz.Continue) { + func(v *RequestBody, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.RequestBodyProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.RequestBodyProps) + c.Fill(&v.VendorExtensible) }, - func(v *Header, c fuzz.Continue) { + func(v *Header, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.HeaderProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.HeaderProps) + c.Fill(&v.VendorExtensible) }, - func(v *ResponsesProps, c fuzz.Continue) { - c.Fuzz(&v.Default) + func(v *ResponsesProps, c randfill.Continue) { + c.Fill(&v.Default) n := c.Intn(5) for i := 0; i < n; i++ { r2 := Response{} - c.Fuzz(&r2) + c.Fill(&r2) // HTTP Status code in 100-599 Range code := c.Intn(500) + 100 v.StatusCodeResponses = make(map[int]*Response) v.StatusCodeResponses[code] = &r2 } }, - func(v *Response, c fuzz.Continue) { + func(v *Response, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Refable) + c.Fill(&v.Refable) return } - c.Fuzz(&v.ResponseProps) - c.Fuzz(&v.VendorExtensible) + c.Fill(&v.ResponseProps) + c.Fill(&v.VendorExtensible) }, - func(v *Operation, c fuzz.Continue) { - c.FuzzNoCustom(v) + func(v *Operation, c randfill.Continue) { + c.FillNoCustom(v) // Do not fuzz null values into the array. for i, val := range v.SecurityRequirement { if val == nil { @@ -196,85 +196,85 @@ var OpenAPIV3FuzzFuncs []interface{} = []interface{}{ } } }, - func(v *spec.Extensions, c fuzz.Continue) { + func(v *spec.Extensions, c randfill.Continue) { numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { if *v == nil { *v = spec.Extensions{} } - (*v)["x-"+c.RandString()] = c.RandString() + (*v)["x-"+c.String(0)] = c.String(0) } }, - func(v *spec.ExternalDocumentation, c fuzz.Continue) { - c.Fuzz(&v.Description) + func(v *spec.ExternalDocumentation, c randfill.Continue) { + c.Fill(&v.Description) v.URL = "https://" + randAlphanumString() }, - func(v *spec.SchemaURL, c fuzz.Continue) { + func(v *spec.SchemaURL, c randfill.Continue) { *v = spec.SchemaURL("https://" + randAlphanumString()) }, - func(v *spec.SchemaOrBool, c fuzz.Continue) { + func(v *spec.SchemaOrBool, c randfill.Continue) { *v = spec.SchemaOrBool{} - if c.RandBool() { - v.Allows = c.RandBool() + if c.Bool() { + v.Allows = c.Bool() } else { v.Schema = &spec.Schema{} v.Allows = true - c.Fuzz(&v.Schema) + c.Fill(&v.Schema) } }, - func(v *spec.SchemaOrArray, c fuzz.Continue) { + func(v *spec.SchemaOrArray, c randfill.Continue) { *v = spec.SchemaOrArray{} - if c.RandBool() { + if c.Bool() { schema := spec.Schema{} - c.Fuzz(&schema) + c.Fill(&schema) v.Schema = &schema } else { v.Schemas = []spec.Schema{} numChildren := c.Intn(5) for i := 0; i < numChildren; i++ { schema := spec.Schema{} - c.Fuzz(&schema) + c.Fill(&schema) v.Schemas = append(v.Schemas, schema) } } }, - func(v *spec.SchemaOrStringArray, c fuzz.Continue) { - if c.RandBool() { + func(v *spec.SchemaOrStringArray, c randfill.Continue) { + if c.Bool() { *v = spec.SchemaOrStringArray{} - if c.RandBool() { - c.Fuzz(&v.Property) + if c.Bool() { + c.Fill(&v.Property) } else { - c.Fuzz(&v.Schema) + c.Fill(&v.Schema) } } }, - func(v *spec.Schema, c fuzz.Continue) { + func(v *spec.Schema, c randfill.Continue) { if c.Intn(refChance) == 0 { - c.Fuzz(&v.Ref) + c.Fill(&v.Ref) return } - if c.RandBool() { + if c.Bool() { // file schema - c.Fuzz(&v.Default) - c.Fuzz(&v.Description) - c.Fuzz(&v.Example) - c.Fuzz(&v.ExternalDocs) + c.Fill(&v.Default) + c.Fill(&v.Description) + c.Fill(&v.Example) + c.Fill(&v.ExternalDocs) - c.Fuzz(&v.Format) - c.Fuzz(&v.ReadOnly) - c.Fuzz(&v.Required) - c.Fuzz(&v.Title) + c.Fill(&v.Format) + c.Fill(&v.ReadOnly) + c.Fill(&v.Required) + c.Fill(&v.Title) v.Type = spec.StringOrArray{"file"} } else { // normal schema - c.Fuzz(&v.SchemaProps) - c.Fuzz(&v.SwaggerSchemaProps) - c.Fuzz(&v.VendorExtensible) - c.Fuzz(&v.ExtraProps) + c.Fill(&v.SchemaProps) + c.Fill(&v.SwaggerSchemaProps) + c.Fill(&v.VendorExtensible) + c.Fill(&v.ExtraProps) } }, diff --git a/vendor/modules.txt b/vendor/modules.txt index 1cec217cc1..2d0f976a14 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -337,18 +337,14 @@ github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/cmpopts github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.2.0 -## explicit; go 1.12 -github.com/google/gofuzz -github.com/google/gofuzz/bytesource # github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db ## explicit; go 1.22 github.com/google/pprof/profile @@ -519,8 +515,8 @@ github.com/openshift-eng/openshift-tests-extension/pkg/flags github.com/openshift-eng/openshift-tests-extension/pkg/ginkgo github.com/openshift-eng/openshift-tests-extension/pkg/util/sets github.com/openshift-eng/openshift-tests-extension/pkg/version -# github.com/openshift/api v0.0.0-20250320170726-75d64d71980b -## explicit; go 1.23.0 +# github.com/openshift/api v0.0.0-20250916150132-83b017b06367 +## explicit; go 1.24.0 github.com/openshift/api github.com/openshift/api/annotations github.com/openshift/api/apiserver @@ -536,6 +532,7 @@ github.com/openshift/api/cloudnetwork/v1 github.com/openshift/api/config github.com/openshift/api/config/v1 github.com/openshift/api/config/v1alpha1 +github.com/openshift/api/config/v1alpha2 github.com/openshift/api/console github.com/openshift/api/console/v1 github.com/openshift/api/features @@ -811,9 +808,10 @@ github.com/spf13/pflag # github.com/stoewer/go-strcase v1.3.0 ## explicit; go 1.11 github.com/stoewer/go-strcase -# github.com/stretchr/testify v1.9.0 +# github.com/stretchr/testify v1.10.0 ## explicit; go 1.17 github.com/stretchr/testify/assert +github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require # github.com/tidwall/gjson v1.18.0 ## explicit; go 1.12 @@ -938,8 +936,8 @@ go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore go.uber.org/zap/zapgrpc -# golang.org/x/crypto v0.32.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.36.0 +## explicit; go 1.23.0 golang.org/x/crypto/blowfish golang.org/x/crypto/chacha20 golang.org/x/crypto/cryptobyte @@ -958,8 +956,8 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf ## explicit; go 1.22.0 golang.org/x/exp/constraints golang.org/x/exp/slices -# golang.org/x/net v0.34.0 -## explicit; go 1.18 +# golang.org/x/net v0.38.0 +## explicit; go 1.23.0 golang.org/x/net/context golang.org/x/net/html golang.org/x/net/html/atom @@ -969,6 +967,7 @@ golang.org/x/net/http/httpproxy golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/httpcommon golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy @@ -979,22 +978,22 @@ golang.org/x/net/websocket golang.org/x/oauth2 golang.org/x/oauth2/clientcredentials golang.org/x/oauth2/internal -# golang.org/x/sync v0.10.0 -## explicit; go 1.18 +# golang.org/x/sync v0.12.0 +## explicit; go 1.23.0 golang.org/x/sync/errgroup golang.org/x/sync/singleflight -# golang.org/x/sys v0.29.0 -## explicit; go 1.18 +# golang.org/x/sys v0.31.0 +## explicit; go 1.23.0 golang.org/x/sys/cpu golang.org/x/sys/plan9 golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/term v0.28.0 -## explicit; go 1.18 +# golang.org/x/term v0.30.0 +## explicit; go 1.23.0 golang.org/x/term -# golang.org/x/text v0.21.0 -## explicit; go 1.18 +# golang.org/x/text v0.23.0 +## explicit; go 1.23.0 golang.org/x/text/cases golang.org/x/text/encoding golang.org/x/text/encoding/charmap @@ -1102,7 +1101,7 @@ google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status google.golang.org/grpc/tap -# google.golang.org/protobuf v1.36.3 +# google.golang.org/protobuf v1.36.5 ## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/protojson @@ -1162,7 +1161,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.32.2 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250704150419-38c60a516ecb +# k8s.io/api v0.33.2 => github.com/openshift/kubernetes/staging/src/k8s.io/api v0.0.0-20250704150419-38c60a516ecb ## explicit; go 1.23.0 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -1235,12 +1234,13 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.32.2 -## explicit; go 1.23.0 +# k8s.io/apimachinery v0.33.2 +## explicit; go 1.24.0 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/meta/testrestmapper +k8s.io/apimachinery/pkg/api/operation k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/api/validation k8s.io/apimachinery/pkg/api/validation/path @@ -1928,7 +1928,7 @@ k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1beta1 -# k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 +# k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff ## explicit; go 1.21 k8s.io/kube-openapi/pkg/builder k8s.io/kube-openapi/pkg/builder3 @@ -2181,6 +2181,10 @@ sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1 +# sigs.k8s.io/randfill v1.0.0 +## explicit; go 1.18 +sigs.k8s.io/randfill +sigs.k8s.io/randfill/bytesource # sigs.k8s.io/structured-merge-diff/v4 v4.6.0 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath diff --git a/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md new file mode 100644 index 0000000000..7566c879ce --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/CONTRIBUTING.md @@ -0,0 +1,43 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + + +## Project Management + +The [maintainers](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES#L12) of this project (and often others who have official positions on the [contributor ladder](https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES)) are responsible for performing project management which oversees development and maintenance of the API, tests, tools, e.t.c. While we try to be generally flexible when it comes to the management of individual pieces (such as Issues or PRs), we have some rules and guidelines which help us plan, coordinate and reduce waste. In this section you'll find some rules/guidelines for contributors related to project management which may extend or go beyond what you would find in the standard [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide). + +### Bumping stale and closed Issues & PRs + +Maintainers are ultimately responsible for triaging new issues and PRs, accepting or declining them, deciding priority and fitting them into milestones intended for future releases. Bots are responsible for marking issues and PRs which stagnate as stale, or closing them if progress does not continue for a long period of time. Due to the nature of this community-driven development effort (we do not have dedicated engineering resources, we rely on the community which is effectively "volunteer time") **not all issues can be accepted, prioritized or completed**. + +You may find times when an issue you're subscribed to and interested in seems to stagnate, or perhaps gets auto-closed. Prior to bumping or directly re-opening issues yourself, we generally ask that you bring these up for discussion on the agenda for one of our community syncs if possible, or bring them up for discussion in Slack or the mailing list as this gives us a better opportunity to discuss the issue and determine viability and logistics. If feasible we **highly recommend being ready to contribute directly** to any stale or unprioritized effort that you want to see move forward, as **the best way to ensure progress is to engage with the community and personally invest time**. + +We (the community) aren't opposed to making exceptions in some cases, but when in doubt please follow the above guidelines before bumping closed or stale issues if you're not ready to personally invest time in them. We are responsible for managing these and without further context or engagement we may set these back to how they were previously organized. diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/sigs.k8s.io/randfill/LICENSE similarity index 99% rename from vendor/github.com/google/gofuzz/LICENSE rename to vendor/sigs.k8s.io/randfill/LICENSE index d645695673..9dd29274c3 100644 --- a/vendor/github.com/google/gofuzz/LICENSE +++ b/vendor/sigs.k8s.io/randfill/LICENSE @@ -1,4 +1,3 @@ - Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -179,7 +178,7 @@ APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" + boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -187,7 +186,8 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 The gofuzz Authors + Copyright 2025 The Kubernetes Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/sigs.k8s.io/randfill/NOTICE b/vendor/sigs.k8s.io/randfill/NOTICE new file mode 100644 index 0000000000..6984e71f65 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/NOTICE @@ -0,0 +1,24 @@ +When donating the randfill project to the CNCF, we could not reach all the +gofuzz contributors to sign the CNCF CLA. As such, according to the CNCF rules +to donate a repository, we must add a NOTICE referencing section 7 of the CLA +with a list of developers who could not be reached. + +`7. Should You wish to submit work that is not Your original creation, You may +submit it to the Foundation separately from any Contribution, identifying the +complete details of its source and of any license or other restriction +(including, but not limited to, related patents, trademarks, and license +agreements) of which you are personally aware, and conspicuously marking the +work as "Submitted on behalf of a third-party: [named here]".` + +Submitted on behalf of a third-party: @dnephin (Daniel Nephin) +Submitted on behalf of a third-party: @AlekSi (Alexey Palazhchenko) +Submitted on behalf of a third-party: @bbigras (Bruno Bigras) +Submitted on behalf of a third-party: @samirkut (Samir) +Submitted on behalf of a third-party: @posener (Eyal Posener) +Submitted on behalf of a third-party: @Ashikpaul (Ashik Paul) +Submitted on behalf of a third-party: @kwongtailau (Kwongtai) +Submitted on behalf of a third-party: @ericcornelissen (Eric Cornelissen) +Submitted on behalf of a third-party: @eclipseo (Robert-André Mauchin) +Submitted on behalf of a third-party: @yanzhoupan (Andrew Pan) +Submitted on behalf of a third-party: @STRRL (Zhiqiang ZHOU) +Submitted on behalf of a third-party: @disconnect3d (Disconnect3d) diff --git a/vendor/sigs.k8s.io/randfill/OWNERS b/vendor/sigs.k8s.io/randfill/OWNERS new file mode 100644 index 0000000000..59f6a50f6b --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners +# See the OWNERS_ALIASES file at https://github.com/kubernetes-sigs/randfill/blob/main/OWNERS_ALIASES for a list of members for each alias. + +approvers: + - sig-testing-leads + - thockin + +reviewers: [] diff --git a/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES new file mode 100644 index 0000000000..927f1209b1 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/OWNERS_ALIASES @@ -0,0 +1,14 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md +# This file should be kept in sync with k/org. + +aliases: + # Reference: https://github.com/kubernetes/org/blob/main/OWNERS_ALIASES + sig-testing-leads: + - BenTheElder + - alvaroaleman + - aojea + - cjwagner + - jbpratt + - michelle192837 + - pohly + - xmcqueen diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/sigs.k8s.io/randfill/README.md similarity index 53% rename from vendor/github.com/google/gofuzz/README.md rename to vendor/sigs.k8s.io/randfill/README.md index b503aae7d7..d892fc9f5d 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/sigs.k8s.io/randfill/README.md @@ -1,39 +1,46 @@ -gofuzz +randfill ====== -gofuzz is a library for populating go objects with random values. +randfill is a library for populating go objects with random values. -[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) -[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) +This is a fork of github.com/google/gofuzz, which was archived. + +NOTE: This repo is supported only for use within Kubernetes. It is not our +intention to support general use. That said, if it works for you, that's +great! If you have a problem, please feel free to file an issue, but be aware +that it may not be a priority for us to fix it unless it is affecting +Kubernetes. PRs are welcome, within reason. + +[![GoDoc](https://godoc.org/sigs.k8s.io/randfill?status.svg)](https://godoc.org/sigs.k8s.io/randfill) This is useful for testing: * Do your project's objects really serialize/unserialize correctly in all cases? * Is there an incorrectly formatted object that will cause your project to panic? -Import with ```import "github.com/google/gofuzz"``` +Import with ```import "sigs.k8s.io/randfill"``` You can use it on single variables: ```go -f := fuzz.New() +f := randfill.New() var myInt int -f.Fuzz(&myInt) // myInt gets a random value. +f.Fill(&myInt) // myInt gets a random value. ``` You can use it on maps: ```go -f := fuzz.New().NilChance(0).NumElements(1, 1) +f := randfill.New().NilChance(0).NumElements(1, 1) var myMap map[ComplexKeyType]string -f.Fuzz(&myMap) // myMap will have exactly one element. +f.Fill(&myMap) // myMap will have exactly one element. ``` Customize the chance of getting a nil pointer: ```go -f := fuzz.New().NilChance(.5) +f := randfill.New().NilChance(.5) var fancyStruct struct { A, B, C, D *string } -f.Fuzz(&fancyStruct) // About half the pointers should be set. +f.Fill(&fancyStruct) // About half the pointers should be set. ``` You can even customize the randomization completely if needed: @@ -49,25 +56,27 @@ type MyInfo struct { BInfo *string } -f := fuzz.New().NilChance(0).Funcs( - func(e *MyInfo, c fuzz.Continue) { +f := randfill.New().NilChance(0).Funcs( + func(e *MyInfo, c randfill.Continue) { switch c.Intn(2) { case 0: e.Type = A - c.Fuzz(&e.AInfo) + c.Fill(&e.AInfo) case 1: e.Type = B - c.Fuzz(&e.BInfo) + c.Fill(&e.BInfo) } }, ) var myObject MyInfo -f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +f.Fill(&myObject) // Type will correspond to whether A or B info is set. ``` See more examples in ```example_test.go```. +## dvyukov/go-fuzz integration + You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. go-fuzz provides the user a byte-slice, which should be converted to different inputs for the tested function. This library can help convert the byte slice. Consider for @@ -76,11 +85,11 @@ example a fuzz test for a the function `mypackage.MyFunc` that takes an int argu // +build gofuzz package mypackage -import fuzz "github.com/google/gofuzz" +import "sigs.k8s.io/randfill" func Fuzz(data []byte) int { var i int - fuzz.NewFromGoFuzz(data).Fuzz(&i) + randfill.NewFromGoFuzz(data).Fill(&i) MyFunc(i) return 0 } diff --git a/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS new file mode 100644 index 0000000000..91d7853378 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/SECURITY_CONTACTS @@ -0,0 +1,16 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +thockin +BenTheElder +aojea +pohly diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/sigs.k8s.io/randfill/bytesource/bytesource.go similarity index 100% rename from vendor/github.com/google/gofuzz/bytesource/bytesource.go rename to vendor/sigs.k8s.io/randfill/bytesource/bytesource.go diff --git a/vendor/sigs.k8s.io/randfill/code-of-conduct.md b/vendor/sigs.k8s.io/randfill/code-of-conduct.md new file mode 100644 index 0000000000..0d15c00cf3 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/randfill/randfill.go b/vendor/sigs.k8s.io/randfill/randfill.go new file mode 100644 index 0000000000..b734824844 --- /dev/null +++ b/vendor/sigs.k8s.io/randfill/randfill.go @@ -0,0 +1,682 @@ +/* +Copyright 2014 Google Inc. All rights reserved. +Copyright 2014 The gofuzz Authors. +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package randfill is a library for populating go objects with random values. +package randfill + +import ( + "fmt" + "math/rand" + "reflect" + "regexp" + "sync" + "time" + "unsafe" + + "strings" + + "sigs.k8s.io/randfill/bytesource" +) + +// funcMap is a map from a type to a function that randfills that type. The +// function is a reflect.Value because the type being filled is different for +// each func. +type funcMap map[reflect.Type]reflect.Value + +// Filler knows how to fill any object with random fields. +type Filler struct { + customFuncs funcMap + defaultFuncs funcMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + allowUnexportedFields bool + skipFieldPatterns []*regexp.Regexp + + lock sync.Mutex +} + +// New returns a new Filler. Customize your Filler further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Filler { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Filler { + f := &Filler{ + defaultFuncs: funcMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(randfillTime), + }, + + customFuncs: funcMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + allowUnexportedFields: false, + } + return f +} + +// NewFromGoFuzz is a helper function that enables using randfill (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Filler should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypackage_fuzz.go" with the content: +// +// // +build gofuzz +// package mypackage +// import "sigs.k8s.io/randfill" +// +// func Fuzz(data []byte) int { +// var i int +// randfill.NewFromGoFuzz(data).Fill(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Filler { + return New().RandSource(bytesource.New(data)) +} + +// Funcs registers custom fill functions for this Filler. +// +// Each entry in customFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// randfill.Continue, which will provide a source of randomness and a way +// to automatically continue filling smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// filling, the function `func(s *string, c randfill.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChance option. For slices, it +// doesn't make much sense to pre-create them--Filler doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +// +// If a function is already registered for a type, and a new function is +// provided, the previous function will be replaced with the new one. +func (f *Filler) Funcs(customFuncs ...interface{}) *Filler { + for i := range customFuncs { + v := reflect.ValueOf(customFuncs[i]) + if v.Kind() != reflect.Func { + panic("Filler.Funcs: all arguments must be functions") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Filler.Funcs: all customFuncs must have 2 arguments and 0 returns") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("Filler.Funcs: customFuncs' first argument must be a pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("Filler.Funcs: customFuncs' second argument must be a randfill.Continue") + } + f.customFuncs[argT] = v + } + return f +} + +// RandSource causes this Filler to get values from the given source of +// randomness. Use this if you want deterministic filling. +func (f *Filler) RandSource(s rand.Source) *Filler { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Filler) NilChance(p float64) *Filler { + if p < 0 || p > 1 { + panic("Filler.NilChance: p must be between 0 and 1, inclusive") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Filler) NumElements(min, max int) *Filler { + if min < 0 { + panic("Filler.NumElements: min must be >= 0") + } + if min > max { + panic("Filler.NumElements: min must be <= max") + } + f.minElements = min + f.maxElements = max + return f +} + +func (f *Filler) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Filler) genShouldFill() bool { + return f.r.Float64() >= f.nilChance +} + +// MaxDepth sets the maximum number of recursive fill calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Filler) MaxDepth(d int) *Filler { + f.maxDepth = d + return f +} + +// AllowUnexportedFields defines whether to fill unexported fields. +func (f *Filler) AllowUnexportedFields(flag bool) *Filler { + f.allowUnexportedFields = flag + return f +} + +// SkipFieldsWithPattern tells this Filler to skip any field whose name matches +// the supplied pattern. Call this multiple times if needed. This is useful to +// skip XXX_ fields generated by protobuf. +func (f *Filler) SkipFieldsWithPattern(pattern *regexp.Regexp) *Filler { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + +// SimpleSelfFiller represents an object that knows how to randfill itself. +// +// Unlike NativeSelfFiller, this interface does not cause the type in question +// to depend on the randfill package. This is most useful for simple types. For +// more complex types, consider using NativeSelfFiller. +type SimpleSelfFiller interface { + // RandFill fills the current object with random data. + RandFill(r *rand.Rand) +} + +// NativeSelfFiller represents an object that knows how to randfill itself. +// +// Unlike SimpleSelfFiller, this interface allows for recursive filling of +// child objects with the same rules as the parent Filler. +type NativeSelfFiller interface { + // RandFill fills the current object with random data. + RandFill(c Continue) +} + +// Fill recursively fills all of obj's fields with something random. First +// this tries to find a custom fill function (see Funcs). If there is no +// custom function, this tests whether the object implements SimpleSelfFiller +// or NativeSelfFiller and if so, calls RandFill on it to fill itself. If that +// fails, this will see if there is a default fill function provided by this +// package. If all of that fails, this will generate random values for all +// primitive fields and then recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Exported (public) fields can always be set, and if +// the AllowUnexportedFields() modifier was called it can try to set unexported +// (private) fields, too. +// +// This is intended for tests, so will panic on bad input or unimplemented +// types. This method takes a lock for the whole Filler, so it is not +// reentrant. See Continue. +func (f *Filler) Fill(obj interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("Filler.Fill: obj must be a pointer") + } + v = v.Elem() + f.fillWithContext(v, 0) +} + +// FillNoCustom is just like Fill, except that any custom fill function for +// obj's type will not be called and obj will not be tested for +// SimpleSelfFiller or NativeSelfFiller. This applies only to obj and not other +// instances of obj's type or to obj's child fields. +// +// obj must be a pointer. Exported (public) fields can always be set, and if +// the AllowUnexportedFields() modifier was called it can try to set unexported +// (private) fields, too. +// +// This is intended for tests, so will panic on bad input or unimplemented +// types. This method takes a lock for the whole Filler, so it is not +// reentrant. See Continue. +func (f *Filler) FillNoCustom(obj interface{}) { + f.lock.Lock() + defer f.lock.Unlock() + + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("Filler.FillNoCustom: obj must be a pointer") + } + v = v.Elem() + f.fillWithContext(v, flagNoCustomFill) +} + +const ( + // Do not try to find a custom fill function. Does not apply recursively. + flagNoCustomFill uint64 = 1 << iota +) + +func (f *Filler) fillWithContext(v reflect.Value, flags uint64) { + fc := &fillerContext{filler: f} + fc.doFill(v, flags) +} + +// fillerContext carries context about a single filling run, which lets Filler +// be thread-safe. +type fillerContext struct { + filler *Filler + curDepth int +} + +func (fc *fillerContext) doFill(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.filler.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + if !fc.filler.allowUnexportedFields || !v.CanAddr() { + return + } + v = reflect.NewAt(v.Type(), unsafe.Pointer(v.UnsafeAddr())).Elem() + } + + if flags&flagNoCustomFill == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.filler.r) + return + } + + switch v.Kind() { + case reflect.Map: + if fc.filler.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.filler.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFill(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFill(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.filler.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFill(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.filler.genShouldFill() { + n := fc.filler.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFill(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.filler.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFill(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.filler.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFill(v.Field(i), 0) + } + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("can't fill type %v, kind %v", v.Type(), v.Kind())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fillerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fill function for it. + doCustom, ok := fc.filler.customFuncs[v.Type()] + if !ok { + // Second: see if it can fill itself. + if v.CanInterface() { + intf := v.Interface() + if fillable, ok := intf.(SimpleSelfFiller); ok { + fillable.RandFill(fc.filler.r) + return true + } + if fillable, ok := intf.(NativeSelfFiller); ok { + fillable.RandFill(Continue{fc: fc, Rand: fc.filler.r}) + return true + } + } + // Finally: see if there is a default fill function. + doCustom, ok = fc.filler.defaultFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{ + v, + reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.filler.r, + }), + }) + return true +} + +// Continue can be passed to custom fill functions to allow them to use +// the correct source of randomness and to continue filling their members. +type Continue struct { + fc *fillerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your filling + // to be repeatable for a given seed. + *rand.Rand +} + +// Fill continues filling obj. obj must be a pointer or a reflect.Value of a +// pointer. See Filler.Fill. +func (c Continue) Fill(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("Continue.Fill: obj must be a pointer") + } + v = v.Elem() + c.fc.doFill(v, 0) +} + +// FillNoCustom continues filling obj, except that any custom fill function for +// obj's type will not be called and obj will not be tested for +// SimpleSelfFiller or NativeSelfFiller. See Filler.FillNoCustom. +func (c Continue) FillNoCustom(obj interface{}) { + v, ok := obj.(reflect.Value) + if !ok { + v = reflect.ValueOf(obj) + } + if v.Kind() != reflect.Ptr { + panic("Continue.FillNoCustom: obj must be a pointer") + } + v = v.Elem() + c.fc.doFill(v, flagNoCustomFill) +} + +const defaultStringMaxLen = 20 + +// String makes a random string up to n characters long. If n is 0, the default +// size range is [0-20). The returned string may include a variety of (valid) +// UTF-8 encodings. +func (c Continue) String(n int) string { + return randString(c.Rand, n) +} + +// Uint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) Uint64() uint64 { + return randUint64(c.Rand) +} + +// Bool returns true or false randomly. +func (c Continue) Bool() bool { + return randBool(c.Rand) +} + +func fillInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fillUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func randfillTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + // Nanosecond values greater than 1Bn are technically allowed but result in + // time.Time values with invalid timezone offsets. + nsec = c.Rand.Int63n(999999999) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fillInt, + reflect.Int8: fillInt, + reflect.Int16: fillInt, + reflect.Int32: fillInt, + reflect.Int64: fillInt, + reflect.Uint: fillUint, + reflect.Uint8: fillUint, + reflect.Uint16: fillUint, + reflect.Uint32: fillUint, + reflect.Uint64: fillUint, + reflect.Uintptr: fillUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + v.SetComplex(complex(r.Float64(), r.Float64())) + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r, 0)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("filling of UnsafePointers is not implemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 +} + +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune +} + +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFillFunc constructs a FillFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFillFunc(n int) func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand, n) + } +} + +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("UnicodeRange.check: the last encoding must be greater than the first") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand, max int) string { + if max == 0 { + max = defaultStringMaxLen + } + n := r.Intn(max) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when users do not set +// CustomStringFillFunc() but want to fill strings. +var defaultUnicodeRanges = UnicodeRanges{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// CustomStringFillFunc constructs a FillFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFillFunc(n int) func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand, n) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand, max int) string { + if max == 0 { + max = defaultStringMaxLen + } + n := r.Intn(max) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand, max int) string { + return defaultUnicodeRanges.randString(r, max) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} From c3004fe5ba339c42382971c2e9625bcb195a6f56 Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 17 Sep 2025 15:24:49 +0200 Subject: [PATCH 3/6] chore: bump github.com/prometheus/alertmanager --- go.mod | 20 +- go.sum | 44 +- .../alecthomas/units/renovate.json5 | 4 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 91 +- .../aws/aws-sdk-go/aws/request/waiter.go | 13 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../analysis/internal/debug/debug.go | 4 +- .../github.com/go-openapi/analysis/mixin.go | 14 +- .../github.com/go-openapi/analysis/schema.go | 4 +- vendor/github.com/go-openapi/loads/README.md | 2 +- vendor/github.com/go-openapi/loads/TODO.md | 3 - vendor/github.com/go-openapi/loads/loaders.go | 2 +- vendor/github.com/go-openapi/spec/README.md | 23 + vendor/github.com/go-openapi/spec/expander.go | 2 +- .../go-openapi/validate/default_validator.go | 26 +- .../go-openapi/validate/example_validator.go | 30 +- .../github.com/go-openapi/validate/formats.go | 6 +- .../github.com/go-openapi/validate/helpers.go | 5 +- .../go-openapi/validate/object_validator.go | 14 +- .../github.com/go-openapi/validate/pools.go | 239 ++-- .../go-openapi/validate/pools_debug.go | 1012 +++++++++++++++++ .../github.com/go-openapi/validate/result.go | 80 +- .../github.com/go-openapi/validate/schema.go | 15 +- .../go-openapi/validate/schema_props.go | 295 +++-- .../go-openapi/validate/slice_validator.go | 6 +- vendor/github.com/go-openapi/validate/spec.go | 61 +- vendor/github.com/go-openapi/validate/type.go | 4 +- .../go-openapi/validate/validator.go | 101 +- .../github.com/prometheus/alertmanager/NOTICE | 5 - .../api/v2/models/alert_status.go | 17 + .../prometheus/testutil/testutil.go | 30 +- .../prometheus/common/config/http_config.go | 58 +- .../prometheus/common/expfmt/encode.go | 4 +- .../prometheus/common/expfmt/expfmt.go | 4 +- .../common/expfmt/openmetrics_create.go | 4 +- .../prometheus/common/expfmt/text_parse.go | 2 +- .../prometheus/common/model/alert.go | 7 +- .../prometheus/common/model/metric.go | 31 +- .../prometheus/common/model/silence.go | 17 +- .../prometheus/common/model/value_float.go | 3 +- .../common/model/value_histogram.go | 7 +- .../prometheus/common/version/info.go | 8 + .../x/tools/go/ast/inspector/inspector.go | 4 +- vendor/modules.txt | 30 +- 44 files changed, 1815 insertions(+), 538 deletions(-) delete mode 100644 vendor/github.com/go-openapi/loads/TODO.md create mode 100644 vendor/github.com/go-openapi/validate/pools_debug.go diff --git a/go.mod b/go.mod index dc9ec5f883..c3875adeec 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.24.0 require ( github.com/Jeffail/gabs/v2 v2.6.1 - github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 + github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b github.com/blang/semver/v4 v4.0.0 github.com/ghodss/yaml v1.0.0 github.com/go-kit/log v0.2.1 @@ -21,9 +21,9 @@ require ( github.com/pborman/uuid v1.2.1 github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0 github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0 - github.com/prometheus/alertmanager v0.27.0 - github.com/prometheus/client_golang v1.20.4 - github.com/prometheus/common v0.60.0 + github.com/prometheus/alertmanager v0.28.1 + github.com/prometheus/client_golang v1.20.5 + github.com/prometheus/common v0.61.0 github.com/prometheus/prometheus v0.54.1 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 @@ -61,7 +61,7 @@ require ( github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect - github.com/aws/aws-sdk-go v1.54.19 // indirect + github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -84,14 +84,14 @@ require ( github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect - github.com/go-openapi/loads v0.21.5 // indirect - github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect - github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -166,7 +166,7 @@ require ( golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.26.0 // indirect + golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect diff --git a/go.sum b/go.sum index 6affc10cb2..ce00bc7bbd 100644 --- a/go.sum +++ b/go.sum @@ -62,8 +62,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= -github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b h1:mimo19zliBX/vSQ6PWWSL9lK8qwHozUj03+zLoEB8O0= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= @@ -73,8 +73,8 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.54.19 h1:tyWV+07jagrNiCcGRzRhdtVjQs7Vy41NwsuOcl0IbVI= -github.com/aws/aws-sdk-go v1.54.19/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= +github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -175,24 +175,24 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= -github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= -github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= -github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= github.com/go-resty/resty/v2 v2.13.1/go.mod h1:GznXlLxkq6Nh4sU59rPmUw3VtgpO3aS96ORAI6Q7d+0= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -479,14 +479,14 @@ github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0 h github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0/go.mod h1:YfnEQzw7tUQa0Sjiz8V6QFc6JUGE+i5wybsjc3EOKn8= github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0 h1:z8ETgiD2hThJi3+3S8eKAbC9/pwPq1kGt8HkeGlDstw= github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0/go.mod h1:EKK9z5OIxxwaCZmgaidiIfvy6mF7x8M3AJHmxwx+QB4= -github.com/prometheus/alertmanager v0.27.0 h1:V6nTa2J5V4s8TG4C4HtrBP/WNSebCCTYGGv4qecA/+I= -github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= +github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= +github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= -github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -497,8 +497,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= -github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -654,8 +654,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -807,8 +807,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= -golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/alecthomas/units/renovate.json5 b/vendor/github.com/alecthomas/units/renovate.json5 index 897864b852..6bb4acde94 100644 --- a/vendor/github.com/alecthomas/units/renovate.json5 +++ b/vendor/github.com/alecthomas/units/renovate.json5 @@ -8,4 +8,8 @@ "group:allNonMajor", "schedule:earlyMondays", // Run once a week. ], + postUpdateOptions: [ + "gomodTidy", + "gomodUpdateImportPaths" + ] } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 84dc7dc08e..c3516e018a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -9503,6 +9503,12 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-north-1", + }: endpoint{}, + endpointKey{ + Region: "eu-south-2", + }: endpoint{}, endpointKey{ Region: "eu-west-1", }: endpoint{}, @@ -13068,6 +13074,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-central-1", }: endpoint{}, + endpointKey{ + Region: "eu-central-2", + }: endpoint{}, endpointKey{ Region: "eu-north-1", }: endpoint{}, @@ -22522,6 +22531,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "network-firewall-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24479,6 +24491,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "portal.sso.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -32552,6 +32572,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -33621,6 +33644,20 @@ var awsPartition = partition{ }: endpoint{}, }, }, + "tax": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "aws-global", + }: endpoint{ + Hostname: "tax.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "textract": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -39973,16 +40010,12 @@ var awsusgovPartition = partition{ Endpoints: serviceEndpoints{ endpointKey{ Region: "us-gov-east-1", - }: endpoint{ - Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + }: endpoint{}, endpointKey{ Region: "us-gov-east-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "autoscaling-plans.us-gov-east-1.amazonaws.com", }, endpointKey{ Region: "us-gov-east-1-fips", @@ -39994,16 +40027,12 @@ var awsusgovPartition = partition{ }, endpointKey{ Region: "us-gov-west-1", - }: endpoint{ - Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, - }, + }: endpoint{}, endpointKey{ Region: "us-gov-west-1", Variant: fipsVariant, }: endpoint{ - Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", - Protocols: []string{"http", "https"}, + Hostname: "autoscaling-plans.us-gov-west-1.amazonaws.com", }, endpointKey{ Region: "us-gov-west-1-fips", @@ -40969,20 +40998,40 @@ var awsusgovPartition = partition{ "directconnect": service{ Endpoints: serviceEndpoints{ endpointKey{ - Region: "us-gov-east-1", + Region: "fips-us-gov-east-1", }: endpoint{ - Hostname: "directconnect.us-gov-east-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-east-1", }, + Deprecated: boxedTrue, }, endpointKey{ - Region: "us-gov-west-1", + Region: "fips-us-gov-west-1", }: endpoint{ - Hostname: "directconnect.us-gov-west-1.amazonaws.com", + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-east-1.amazonaws.com", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "directconnect-fips.us-gov-west-1.amazonaws.com", }, }, }, @@ -46372,6 +46421,9 @@ var awsisoPartition = partition{ endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + }: endpoint{}, }, }, "appconfig": service{ @@ -47685,6 +47737,13 @@ var awsisobPartition = partition{ }: endpoint{}, }, }, + "apigateway": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-isob-east-1", + }: endpoint{}, + }, + }, "appconfig": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go index 4601f883cc..992ed0464b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -256,8 +256,17 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro s := a.Expected.(int) result = s == req.HTTPResponse.StatusCode case ErrorWaiterMatch: - if aerr, ok := err.(awserr.Error); ok { - result = aerr.Code() == a.Expected.(string) + switch ex := a.Expected.(type) { + case string: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == ex + } + case bool: + if ex { + result = err != nil + } else { + result = err == nil + } } default: waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b2040b05e5..d15e3c84c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.54.19" +const SDKVersion = "1.55.5" diff --git a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go index ec0fec0229..39f55a97bf 100644 --- a/vendor/github.com/go-openapi/analysis/internal/debug/debug.go +++ b/vendor/github.com/go-openapi/analysis/internal/debug/debug.go @@ -29,7 +29,7 @@ var ( // GetLogger provides a prefix debug logger func GetLogger(prefix string, debug bool) func(string, ...interface{}) { if debug { - logger := log.New(output, fmt.Sprintf("%s:", prefix), log.LstdFlags) + logger := log.New(output, prefix+":", log.LstdFlags) return func(msg string, args ...interface{}) { _, file1, pos1, _ := runtime.Caller(1) @@ -37,5 +37,5 @@ func GetLogger(prefix string, debug bool) func(string, ...interface{}) { } } - return func(msg string, args ...interface{}) {} + return func(_ string, _ ...interface{}) {} } diff --git a/vendor/github.com/go-openapi/analysis/mixin.go b/vendor/github.com/go-openapi/analysis/mixin.go index 57cb7a9dc3..7785a29b27 100644 --- a/vendor/github.com/go-openapi/analysis/mixin.go +++ b/vendor/github.com/go-openapi/analysis/mixin.go @@ -53,7 +53,7 @@ import ( // collisions. func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped := make([]string, 0, len(mixins)) - opIds := getOpIds(primary) + opIDs := getOpIDs(primary) initPrimary(primary) for i, m := range mixins { @@ -74,7 +74,7 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { skipped = append(skipped, mergeDefinitions(primary, m)...) // merging paths requires a map of operationIDs to work with - skipped = append(skipped, mergePaths(primary, m, opIds, i)...) + skipped = append(skipped, mergePaths(primary, m, opIDs, i)...) skipped = append(skipped, mergeParameters(primary, m)...) @@ -84,9 +84,9 @@ func Mixin(primary *spec.Swagger, mixins ...*spec.Swagger) []string { return skipped } -// getOpIds extracts all the paths..operationIds from the given +// getOpIDs extracts all the paths..operationIds from the given // spec and returns them as the keys in a map with 'true' values. -func getOpIds(s *spec.Swagger) map[string]bool { +func getOpIDs(s *spec.Swagger) map[string]bool { rv := make(map[string]bool) if s.Paths == nil { return rv @@ -179,7 +179,7 @@ func mergeDefinitions(primary *spec.Swagger, m *spec.Swagger) (skipped []string) return } -func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, mixIndex int) (skipped []string) { +func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIDs map[string]bool, mixIndex int) (skipped []string) { if m.Paths != nil { for k, v := range m.Paths.Paths { if _, exists := primary.Paths.Paths[k]; exists { @@ -198,10 +198,10 @@ func mergePaths(primary *spec.Swagger, m *spec.Swagger, opIds map[string]bool, m // all the proivded specs are already unique. piops := pathItemOps(v) for _, piop := range piops { - if opIds[piop.ID] { + if opIDs[piop.ID] { piop.ID = fmt.Sprintf("%v%v%v", piop.ID, "Mixin", mixIndex) } - opIds[piop.ID] = true + opIDs[piop.ID] = true } primary.Paths.Paths[k] = v } diff --git a/vendor/github.com/go-openapi/analysis/schema.go b/vendor/github.com/go-openapi/analysis/schema.go index 95f35dc756..ab190db5b7 100644 --- a/vendor/github.com/go-openapi/analysis/schema.go +++ b/vendor/github.com/go-openapi/analysis/schema.go @@ -1,7 +1,7 @@ package analysis import ( - "fmt" + "errors" "github.com/go-openapi/spec" "github.com/go-openapi/strfmt" @@ -19,7 +19,7 @@ type SchemaOpts struct { // patterns. func Schema(opts SchemaOpts) (*AnalyzedSchema, error) { if opts.Schema == nil { - return nil, fmt.Errorf("no schema to analyze") + return nil, errors.New("no schema to analyze") } a := &AnalyzedSchema{ diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index 6b8307c84d..f8bd440dfc 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,4 +1,4 @@ -# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/lods) +# Loads OAI specs [![Build Status](https://github.com/go-openapi/loads/actions/workflows/go-test.yml/badge.svg)](https://github.com/go-openapi/loads/actions?query=workflow%3A"go+test") [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) [![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) diff --git a/vendor/github.com/go-openapi/loads/TODO.md b/vendor/github.com/go-openapi/loads/TODO.md deleted file mode 100644 index 103937acdf..0000000000 --- a/vendor/github.com/go-openapi/loads/TODO.md +++ /dev/null @@ -1,3 +0,0 @@ -[x] why not filepath in JSONDoc() -[] integration tests package -[] relint diff --git a/vendor/github.com/go-openapi/loads/loaders.go b/vendor/github.com/go-openapi/loads/loaders.go index 705275cb6e..b2d1e034c5 100644 --- a/vendor/github.com/go-openapi/loads/loaders.go +++ b/vendor/github.com/go-openapi/loads/loaders.go @@ -21,7 +21,7 @@ var ( func init() { jsonLoader := &loader{ DocLoaderWithMatch: DocLoaderWithMatch{ - Match: func(pth string) bool { + Match: func(_ string) bool { return true }, Fn: JSONDoc, diff --git a/vendor/github.com/go-openapi/spec/README.md b/vendor/github.com/go-openapi/spec/README.md index 822172ff79..7fd2810c69 100644 --- a/vendor/github.com/go-openapi/spec/README.md +++ b/vendor/github.com/go-openapi/spec/README.md @@ -29,3 +29,26 @@ The object model for OpenAPI specification documents. > This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story. > > An early attempt to support Swagger 3 may be found at: https://github.com/go-openapi/spec3 + +* Does the unmarshaling support YAML? + +> Not directly. The exposed types know only how to unmarshal from JSON. +> +> In order to load a YAML document as a Swagger spec, you need to use the loaders provided by +> github.com/go-openapi/loads +> +> Take a look at the example there: https://pkg.go.dev/github.com/go-openapi/loads#example-Spec +> +> See also https://github.com/go-openapi/spec/issues/164 + +* How can I validate a spec? + +> Validation is provided by [the validate package](http://github.com/go-openapi/validate) + +* Why do we have an `ID` field for `Schema` which is not part of the swagger spec? + +> We found jsonschema compatibility more important: since `id` in jsonschema influences +> how `$ref` are resolved. +> This `id` does not conflict with any property named `id`. +> +> See also https://github.com/go-openapi/spec/issues/23 diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index 74f91c3a8b..b81a5699a0 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -57,7 +57,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { if !options.SkipSchemas { for key, definition := range spec.Definitions { parentRefs := make([]string, 0, 10) - parentRefs = append(parentRefs, fmt.Sprintf("#/definitions/%s", key)) + parentRefs = append(parentRefs, "#/definitions/"+key) def, err := expandSchema(definition, parentRefs, resolver, specBasePath) if resolver.shouldStopOnError(err) { diff --git a/vendor/github.com/go-openapi/validate/default_validator.go b/vendor/github.com/go-openapi/validate/default_validator.go index e30d1501f5..e0dd93839e 100644 --- a/vendor/github.com/go-openapi/validate/default_validator.go +++ b/vendor/github.com/go-openapi/validate/default_validator.go @@ -83,7 +83,7 @@ func (d *defaultValidator) isVisited(path string) bool { // Validate validates the default values declared in the swagger spec func (d *defaultValidator) Validate() *Result { - errs := poolOfResults.BorrowResult() // will redeem when merged + errs := pools.poolOfResults.BorrowResult() // will redeem when merged if d == nil || d.SpecValidator == nil { return errs @@ -97,7 +97,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // every default value that is specified must validate against the schema for that property // headers, items, parameters, schema - res := poolOfResults.BorrowResult() // will redeem when merged + res := pools.poolOfResults.BorrowResult() // will redeem when merged s := d.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -119,6 +119,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -128,6 +130,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -137,6 +141,8 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -162,7 +168,7 @@ func (d *defaultValidator) validateDefaultValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration d.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(d.validateDefaultValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -188,6 +194,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -197,6 +205,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon if red.HasErrorsOrWarnings() { res.AddErrors(defaultValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -216,6 +226,8 @@ func (d *defaultValidator) validateDefaultInResponse(resp *spec.Response, respon // Additional message to make sure the context of the error is not lost res.AddErrors(defaultValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } return res @@ -227,7 +239,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri return nil } d.beingVisited(path) - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if schema.Default != nil { @@ -251,7 +263,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is not supported by Swagger 2.0 (and 3.0 as well) - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -260,7 +272,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(d.validateDefaultValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(d.validateDefaultValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -273,7 +285,7 @@ func (d *defaultValidator) validateDefaultValueSchemaAgainstSchema(path, in stri // TODO: Temporary duplicated code. Need to refactor with examples func (d *defaultValidator) validateDefaultValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() s := d.SpecValidator if items != nil { if items.Default != nil { diff --git a/vendor/github.com/go-openapi/validate/example_validator.go b/vendor/github.com/go-openapi/validate/example_validator.go index 2e2cce1b89..d08956973c 100644 --- a/vendor/github.com/go-openapi/validate/example_validator.go +++ b/vendor/github.com/go-openapi/validate/example_validator.go @@ -59,7 +59,7 @@ func (ex *exampleValidator) isVisited(path string) bool { // - individual property // - responses func (ex *exampleValidator) Validate() *Result { - errs := poolOfResults.BorrowResult() + errs := pools.poolOfResults.BorrowResult() if ex == nil || ex.SpecValidator == nil { return errs @@ -75,7 +75,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // in: schemas, properties, object, items // not in: headers, parameters without schema - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator for method, pathItem := range s.expandedAnalyzer().Operations() { @@ -97,6 +97,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -106,8 +108,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueItemsDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) - } else { - poolOfResults.RedeemResult(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -117,8 +119,8 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueDoesNotValidateMsg(param.Name, param.In)) res.Merge(red) - } else { - poolOfResults.RedeemResult(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } } @@ -144,7 +146,7 @@ func (ex *exampleValidator) validateExampleValueValidAgainstSchema() *Result { // reset explored schemas to get depth-first recursive-proof exploration ex.resetVisited() for nm, sch := range s.spec.Spec().Definitions { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("definitions.%s", nm), "body", &sch)) //#nosec + res.Merge(ex.validateExampleValueSchemaAgainstSchema("definitions."+nm, "body", &sch)) //#nosec } } return res @@ -170,6 +172,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -179,6 +183,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo if red.HasErrorsOrWarnings() { res.AddWarnings(exampleValueHeaderItemsDoesNotValidateMsg(operationID, nm, responseName)) res.MergeAsWarnings(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -198,6 +204,8 @@ func (ex *exampleValidator) validateExampleInResponse(resp *spec.Response, respo // Additional message to make sure the context of the error is not lost res.AddWarnings(exampleValueInDoesNotValidateMsg(operationID, responseName)) res.Merge(red) + } else if red.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(red) } } @@ -225,7 +233,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } ex.beingVisited(path) s := ex.SpecValidator - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() if schema.Example != nil { res.MergeAsWarnings( @@ -248,7 +256,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str } if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil { // NOTE: we keep validating values, even though additionalItems is unsupported in Swagger 2.0 (and 3.0 as well) - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalItems", path), in, schema.AdditionalItems.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalItems", in, schema.AdditionalItems.Schema)) } for propName, prop := range schema.Properties { res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec @@ -257,7 +265,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+"."+propName, in, &prop)) //#nosec } if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil { - res.Merge(ex.validateExampleValueSchemaAgainstSchema(fmt.Sprintf("%s.additionalProperties", path), in, schema.AdditionalProperties.Schema)) + res.Merge(ex.validateExampleValueSchemaAgainstSchema(path+".additionalProperties", in, schema.AdditionalProperties.Schema)) } if schema.AllOf != nil { for i, aoSch := range schema.AllOf { @@ -271,7 +279,7 @@ func (ex *exampleValidator) validateExampleValueSchemaAgainstSchema(path, in str // func (ex *exampleValidator) validateExampleValueItemsAgainstSchema(path, in string, root interface{}, items *spec.Items) *Result { - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() s := ex.SpecValidator if items != nil { if items.Example != nil { diff --git a/vendor/github.com/go-openapi/validate/formats.go b/vendor/github.com/go-openapi/validate/formats.go index 9c66cabb2c..f4e3552130 100644 --- a/vendor/github.com/go-openapi/validate/formats.go +++ b/vendor/github.com/go-openapi/validate/formats.go @@ -36,7 +36,7 @@ func newFormatValidator(path, in, format string, formats strfmt.Registry, opts * var f *formatValidator if opts.recycleValidators { - f = poolOfFormatValidators.BorrowValidator() + f = pools.poolOfFormatValidators.BorrowValidator() } else { f = new(formatValidator) } @@ -82,7 +82,7 @@ func (f *formatValidator) Validate(val interface{}) *Result { var result *Result if f.Options.recycleResult { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { result = new(Result) } @@ -95,5 +95,5 @@ func (f *formatValidator) Validate(val interface{}) *Result { } func (f *formatValidator) redeem() { - poolOfFormatValidators.RedeemValidator(f) + pools.poolOfFormatValidators.RedeemValidator(f) } diff --git a/vendor/github.com/go-openapi/validate/helpers.go b/vendor/github.com/go-openapi/validate/helpers.go index e855994dfe..757e403d91 100644 --- a/vendor/github.com/go-openapi/validate/helpers.go +++ b/vendor/github.com/go-openapi/validate/helpers.go @@ -105,7 +105,7 @@ func (h *errorHelper) sErr(err errors.Error, recycle bool) *Result { // Builds a Result from standard errors.Error var result *Result if recycle { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { result = new(Result) } @@ -233,7 +233,7 @@ func (h *paramHelper) safeExpandedParamsFor(path, method, operationID string, re operation.Parameters = resolvedParams for _, ppr := range s.expandedAnalyzer().SafeParamsFor(method, path, - func(p spec.Parameter, err error) bool { + func(_ spec.Parameter, err error) bool { // since params have already been expanded, there are few causes for error res.AddErrors(someParametersBrokenMsg(path, method, operationID)) // original error from analyzer @@ -314,6 +314,7 @@ func (r *responseHelper) expandResponseRef( errorHelp.addPointerError(res, err, response.Ref.String(), path) return nil, res } + return response, res } diff --git a/vendor/github.com/go-openapi/validate/object_validator.go b/vendor/github.com/go-openapi/validate/object_validator.go index c556a9b61d..dff73fa98a 100644 --- a/vendor/github.com/go-openapi/validate/object_validator.go +++ b/vendor/github.com/go-openapi/validate/object_validator.go @@ -49,7 +49,7 @@ func newObjectValidator(path, in string, var v *objectValidator if opts.recycleValidators { - v = poolOfObjectValidators.BorrowValidator() + v = pools.poolOfObjectValidators.BorrowValidator() } else { v = new(objectValidator) } @@ -183,7 +183,7 @@ func (o *objectValidator) Validate(data interface{}) *Result { var res *Result if o.Options.recycleResult { - res = poolOfResults.BorrowResult() + res = pools.poolOfResults.BorrowResult() } else { res = new(Result) } @@ -336,9 +336,9 @@ func (o *objectValidator) validatePropertiesSchema(val map[string]interface{}, r // Property types: // - regular Property - pSchema := poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties + pSchema := pools.poolOfSchemas.BorrowSchema() // recycle a spec.Schema object which lifespan extends only to the validation of properties defer func() { - poolOfSchemas.RedeemSchema(pSchema) + pools.poolOfSchemas.RedeemSchema(pSchema) }() for pName := range o.Properties { @@ -398,9 +398,9 @@ func (o *objectValidator) validatePatternProperty(key string, value interface{}, succeededOnce := false patterns := make([]string, 0, len(o.PatternProperties)) - schema := poolOfSchemas.BorrowSchema() + schema := pools.poolOfSchemas.BorrowSchema() defer func() { - poolOfSchemas.RedeemSchema(schema) + pools.poolOfSchemas.RedeemSchema(schema) }() for k := range o.PatternProperties { @@ -427,5 +427,5 @@ func (o *objectValidator) validatePatternProperty(key string, value interface{}, } func (o *objectValidator) redeem() { - poolOfObjectValidators.RedeemValidator(o) + pools.poolOfObjectValidators.RedeemValidator(o) } diff --git a/vendor/github.com/go-openapi/validate/pools.go b/vendor/github.com/go-openapi/validate/pools.go index 728ed0a569..3ddce4dcc2 100644 --- a/vendor/github.com/go-openapi/validate/pools.go +++ b/vendor/github.com/go-openapi/validate/pools.go @@ -1,3 +1,5 @@ +//go:build !validatedebug + package validate import ( @@ -6,26 +8,7 @@ import ( "github.com/go-openapi/spec" ) -var ( - // memory pools for all validator objects. - // - // Each pool can be borrowed from and redeemed to. - poolOfSchemaValidators schemaValidatorsPool - poolOfObjectValidators objectValidatorsPool - poolOfSliceValidators sliceValidatorsPool - poolOfItemsValidators itemsValidatorsPool - poolOfBasicCommonValidators basicCommonValidatorsPool - poolOfHeaderValidators headerValidatorsPool - poolOfParamValidators paramValidatorsPool - poolOfBasicSliceValidators basicSliceValidatorsPool - poolOfNumberValidators numberValidatorsPool - poolOfStringValidators stringValidatorsPool - poolOfSchemaPropsValidators schemaPropsValidatorsPool - poolOfFormatValidators formatValidatorsPool - poolOfTypeValidators typeValidatorsPool - poolOfSchemas schemasPool - poolOfResults resultsPool -) +var pools allPools func init() { resetPools() @@ -35,158 +18,168 @@ func resetPools() { // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool // and further calls to Get are mishandled. - poolOfSchemaValidators = schemaValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &SchemaValidator{} - return s + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, }, }, - } - - poolOfObjectValidators = objectValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &objectValidator{} + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} - return s + return s + }, }, }, - } - - poolOfSliceValidators = sliceValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &schemaSliceValidator{} + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} - return s + return s + }, }, }, - } - - poolOfItemsValidators = itemsValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &itemsValidator{} + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} - return s + return s + }, }, }, - } - - poolOfBasicCommonValidators = basicCommonValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &basicCommonValidator{} + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} - return s + return s + }, }, }, - } - - poolOfHeaderValidators = headerValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &HeaderValidator{} + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} - return s + return s + }, }, }, - } - - poolOfParamValidators = paramValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &ParamValidator{} + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} - return s + return s + }, }, }, - } - - poolOfBasicSliceValidators = basicSliceValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &basicSliceValidator{} + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} - return s + return s + }, }, }, - } - - poolOfNumberValidators = numberValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &numberValidator{} + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} - return s + return s + }, }, }, - } - - poolOfStringValidators = stringValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &stringValidator{} + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} - return s + return s + }, }, }, - } - - poolOfSchemaPropsValidators = schemaPropsValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &schemaPropsValidator{} + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} - return s + return s + }, }, }, - } - - poolOfFormatValidators = formatValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &formatValidator{} + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} - return s + return s + }, }, }, - } - - poolOfTypeValidators = typeValidatorsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &typeValidator{} + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} - return s + return s + }, }, }, - } - - poolOfSchemas = schemasPool{ - Pool: &sync.Pool{ - New: func() any { - s := &spec.Schema{} + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} - return s + return s + }, }, }, - } - - poolOfResults = resultsPool{ - Pool: &sync.Pool{ - New: func() any { - s := &Result{} + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} - return s + return s + }, }, }, } } type ( + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + schemaValidatorsPool struct { *sync.Pool } diff --git a/vendor/github.com/go-openapi/validate/pools_debug.go b/vendor/github.com/go-openapi/validate/pools_debug.go new file mode 100644 index 0000000000..12949f02a7 --- /dev/null +++ b/vendor/github.com/go-openapi/validate/pools_debug.go @@ -0,0 +1,1012 @@ +//go:build validatedebug + +package validate + +import ( + "fmt" + "runtime" + "sync" + "testing" + + "github.com/go-openapi/spec" +) + +// This version of the pools is to be used for debugging and testing, with build tag "validatedebug". +// +// In this mode, the pools are tracked for allocation and redemption of borrowed objects, so we can +// verify a few behaviors of the validators. The debug pools panic when an invalid usage pattern is detected. + +var pools allPools + +func init() { + resetPools() +} + +func resetPools() { + // NOTE: for testing purpose, we might want to reset pools after calling Validate twice. + // The pool is corrupted in that case: calling Put twice inserts a duplicate in the pool + // and further calls to Get are mishandled. + + pools = allPools{ + poolOfSchemaValidators: schemaValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &SchemaValidator{} + + return s + }, + }, + debugMap: make(map[*SchemaValidator]status), + allocMap: make(map[*SchemaValidator]string), + redeemMap: make(map[*SchemaValidator]string), + }, + poolOfObjectValidators: objectValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &objectValidator{} + + return s + }, + }, + debugMap: make(map[*objectValidator]status), + allocMap: make(map[*objectValidator]string), + redeemMap: make(map[*objectValidator]string), + }, + poolOfSliceValidators: sliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaSliceValidator{} + + return s + }, + }, + debugMap: make(map[*schemaSliceValidator]status), + allocMap: make(map[*schemaSliceValidator]string), + redeemMap: make(map[*schemaSliceValidator]string), + }, + poolOfItemsValidators: itemsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &itemsValidator{} + + return s + }, + }, + debugMap: make(map[*itemsValidator]status), + allocMap: make(map[*itemsValidator]string), + redeemMap: make(map[*itemsValidator]string), + }, + poolOfBasicCommonValidators: basicCommonValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicCommonValidator{} + + return s + }, + }, + debugMap: make(map[*basicCommonValidator]status), + allocMap: make(map[*basicCommonValidator]string), + redeemMap: make(map[*basicCommonValidator]string), + }, + poolOfHeaderValidators: headerValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &HeaderValidator{} + + return s + }, + }, + debugMap: make(map[*HeaderValidator]status), + allocMap: make(map[*HeaderValidator]string), + redeemMap: make(map[*HeaderValidator]string), + }, + poolOfParamValidators: paramValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &ParamValidator{} + + return s + }, + }, + debugMap: make(map[*ParamValidator]status), + allocMap: make(map[*ParamValidator]string), + redeemMap: make(map[*ParamValidator]string), + }, + poolOfBasicSliceValidators: basicSliceValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &basicSliceValidator{} + + return s + }, + }, + debugMap: make(map[*basicSliceValidator]status), + allocMap: make(map[*basicSliceValidator]string), + redeemMap: make(map[*basicSliceValidator]string), + }, + poolOfNumberValidators: numberValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &numberValidator{} + + return s + }, + }, + debugMap: make(map[*numberValidator]status), + allocMap: make(map[*numberValidator]string), + redeemMap: make(map[*numberValidator]string), + }, + poolOfStringValidators: stringValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &stringValidator{} + + return s + }, + }, + debugMap: make(map[*stringValidator]status), + allocMap: make(map[*stringValidator]string), + redeemMap: make(map[*stringValidator]string), + }, + poolOfSchemaPropsValidators: schemaPropsValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &schemaPropsValidator{} + + return s + }, + }, + debugMap: make(map[*schemaPropsValidator]status), + allocMap: make(map[*schemaPropsValidator]string), + redeemMap: make(map[*schemaPropsValidator]string), + }, + poolOfFormatValidators: formatValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &formatValidator{} + + return s + }, + }, + debugMap: make(map[*formatValidator]status), + allocMap: make(map[*formatValidator]string), + redeemMap: make(map[*formatValidator]string), + }, + poolOfTypeValidators: typeValidatorsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &typeValidator{} + + return s + }, + }, + debugMap: make(map[*typeValidator]status), + allocMap: make(map[*typeValidator]string), + redeemMap: make(map[*typeValidator]string), + }, + poolOfSchemas: schemasPool{ + Pool: &sync.Pool{ + New: func() any { + s := &spec.Schema{} + + return s + }, + }, + debugMap: make(map[*spec.Schema]status), + allocMap: make(map[*spec.Schema]string), + redeemMap: make(map[*spec.Schema]string), + }, + poolOfResults: resultsPool{ + Pool: &sync.Pool{ + New: func() any { + s := &Result{} + + return s + }, + }, + debugMap: make(map[*Result]status), + allocMap: make(map[*Result]string), + redeemMap: make(map[*Result]string), + }, + } +} + +const ( + statusFresh status = iota + 1 + statusRecycled + statusRedeemed +) + +func (s status) String() string { + switch s { + case statusFresh: + return "fresh" + case statusRecycled: + return "recycled" + case statusRedeemed: + return "redeemed" + default: + panic(fmt.Errorf("invalid status: %d", s)) + } +} + +type ( + // Debug + status uint8 + + allPools struct { + // memory pools for all validator objects. + // + // Each pool can be borrowed from and redeemed to. + poolOfSchemaValidators schemaValidatorsPool + poolOfObjectValidators objectValidatorsPool + poolOfSliceValidators sliceValidatorsPool + poolOfItemsValidators itemsValidatorsPool + poolOfBasicCommonValidators basicCommonValidatorsPool + poolOfHeaderValidators headerValidatorsPool + poolOfParamValidators paramValidatorsPool + poolOfBasicSliceValidators basicSliceValidatorsPool + poolOfNumberValidators numberValidatorsPool + poolOfStringValidators stringValidatorsPool + poolOfSchemaPropsValidators schemaPropsValidatorsPool + poolOfFormatValidators formatValidatorsPool + poolOfTypeValidators typeValidatorsPool + poolOfSchemas schemasPool + poolOfResults resultsPool + } + + schemaValidatorsPool struct { + *sync.Pool + debugMap map[*SchemaValidator]status + allocMap map[*SchemaValidator]string + redeemMap map[*SchemaValidator]string + mx sync.Mutex + } + + objectValidatorsPool struct { + *sync.Pool + debugMap map[*objectValidator]status + allocMap map[*objectValidator]string + redeemMap map[*objectValidator]string + mx sync.Mutex + } + + sliceValidatorsPool struct { + *sync.Pool + debugMap map[*schemaSliceValidator]status + allocMap map[*schemaSliceValidator]string + redeemMap map[*schemaSliceValidator]string + mx sync.Mutex + } + + itemsValidatorsPool struct { + *sync.Pool + debugMap map[*itemsValidator]status + allocMap map[*itemsValidator]string + redeemMap map[*itemsValidator]string + mx sync.Mutex + } + + basicCommonValidatorsPool struct { + *sync.Pool + debugMap map[*basicCommonValidator]status + allocMap map[*basicCommonValidator]string + redeemMap map[*basicCommonValidator]string + mx sync.Mutex + } + + headerValidatorsPool struct { + *sync.Pool + debugMap map[*HeaderValidator]status + allocMap map[*HeaderValidator]string + redeemMap map[*HeaderValidator]string + mx sync.Mutex + } + + paramValidatorsPool struct { + *sync.Pool + debugMap map[*ParamValidator]status + allocMap map[*ParamValidator]string + redeemMap map[*ParamValidator]string + mx sync.Mutex + } + + basicSliceValidatorsPool struct { + *sync.Pool + debugMap map[*basicSliceValidator]status + allocMap map[*basicSliceValidator]string + redeemMap map[*basicSliceValidator]string + mx sync.Mutex + } + + numberValidatorsPool struct { + *sync.Pool + debugMap map[*numberValidator]status + allocMap map[*numberValidator]string + redeemMap map[*numberValidator]string + mx sync.Mutex + } + + stringValidatorsPool struct { + *sync.Pool + debugMap map[*stringValidator]status + allocMap map[*stringValidator]string + redeemMap map[*stringValidator]string + mx sync.Mutex + } + + schemaPropsValidatorsPool struct { + *sync.Pool + debugMap map[*schemaPropsValidator]status + allocMap map[*schemaPropsValidator]string + redeemMap map[*schemaPropsValidator]string + mx sync.Mutex + } + + formatValidatorsPool struct { + *sync.Pool + debugMap map[*formatValidator]status + allocMap map[*formatValidator]string + redeemMap map[*formatValidator]string + mx sync.Mutex + } + + typeValidatorsPool struct { + *sync.Pool + debugMap map[*typeValidator]status + allocMap map[*typeValidator]string + redeemMap map[*typeValidator]string + mx sync.Mutex + } + + schemasPool struct { + *sync.Pool + debugMap map[*spec.Schema]status + allocMap map[*spec.Schema]string + redeemMap map[*spec.Schema]string + mx sync.Mutex + } + + resultsPool struct { + *sync.Pool + debugMap map[*Result]status + allocMap map[*Result]string + redeemMap map[*Result]string + mx sync.Mutex + } +) + +func (p *schemaValidatorsPool) BorrowValidator() *SchemaValidator { + s := p.Get().(*SchemaValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaValidatorsPool) RedeemValidator(s *SchemaValidator) { + // NOTE: s might be nil. In that case, Put is a noop. + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *objectValidatorsPool) BorrowValidator() *objectValidator { + s := p.Get().(*objectValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled object should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *objectValidatorsPool) RedeemValidator(s *objectValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed object should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed object should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *sliceValidatorsPool) BorrowValidator() *schemaSliceValidator { + s := p.Get().(*schemaSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled schemaSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *sliceValidatorsPool) RedeemValidator(s *schemaSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *itemsValidatorsPool) BorrowValidator() *itemsValidator { + s := p.Get().(*itemsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled itemsValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *itemsValidatorsPool) RedeemValidator(s *itemsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed itemsValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed itemsValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicCommonValidatorsPool) BorrowValidator() *basicCommonValidator { + s := p.Get().(*basicCommonValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicCommonValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicCommonValidatorsPool) RedeemValidator(s *basicCommonValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicCommonValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicCommonValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *headerValidatorsPool) BorrowValidator() *HeaderValidator { + s := p.Get().(*HeaderValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled HeaderValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *headerValidatorsPool) RedeemValidator(s *HeaderValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed header should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed header should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *paramValidatorsPool) BorrowValidator() *ParamValidator { + s := p.Get().(*ParamValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *paramValidatorsPool) RedeemValidator(s *ParamValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed param should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed param should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *basicSliceValidatorsPool) BorrowValidator() *basicSliceValidator { + s := p.Get().(*basicSliceValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled basicSliceValidator should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *basicSliceValidatorsPool) RedeemValidator(s *basicSliceValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed basicSliceValidator should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed basicSliceValidator should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *numberValidatorsPool) BorrowValidator() *numberValidator { + s := p.Get().(*numberValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled number should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *numberValidatorsPool) RedeemValidator(s *numberValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed number should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed number should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *stringValidatorsPool) BorrowValidator() *stringValidator { + s := p.Get().(*stringValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled string should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *stringValidatorsPool) RedeemValidator(s *stringValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed string should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed string should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemaPropsValidatorsPool) BorrowValidator() *schemaPropsValidator { + s := p.Get().(*schemaPropsValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled param should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemaPropsValidatorsPool) RedeemValidator(s *schemaPropsValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed schemaProps should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed schemaProps should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *formatValidatorsPool) BorrowValidator() *formatValidator { + s := p.Get().(*formatValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled format should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *formatValidatorsPool) RedeemValidator(s *formatValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed format should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed format should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *typeValidatorsPool) BorrowValidator() *typeValidator { + s := p.Get().(*typeValidator) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled type should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *typeValidatorsPool) RedeemValidator(s *typeValidator) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed type should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic(fmt.Errorf("redeemed type should have been allocated from a fresh or recycled pointer. Got status %s, already redeamed at: %s", x, p.redeemMap[s])) + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *schemasPool) BorrowSchema() *spec.Schema { + s := p.Get().(*spec.Schema) + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled spec.Schema should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *schemasPool) RedeemSchema(s *spec.Schema) { + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed spec.Schema should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed spec.Schema should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *resultsPool) BorrowResult() *Result { + s := p.Get().(*Result).cleared() + + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + p.debugMap[s] = statusFresh + } else { + if x != statusRedeemed { + panic("recycled result should have been redeemed") + } + p.debugMap[s] = statusRecycled + } + p.allocMap[s] = caller() + + return s +} + +func (p *resultsPool) RedeemResult(s *Result) { + if s == emptyResult { + if len(s.Errors) > 0 || len(s.Warnings) > 0 { + panic("empty result should not mutate") + } + return + } + p.mx.Lock() + defer p.mx.Unlock() + x, ok := p.debugMap[s] + if !ok { + panic("redeemed Result should have been allocated") + } + if x != statusRecycled && x != statusFresh { + panic("redeemed Result should have been allocated from a fresh or recycled pointer") + } + p.debugMap[s] = statusRedeemed + p.redeemMap[s] = caller() + p.Put(s) +} + +func (p *allPools) allIsRedeemed(t testing.TB) bool { + outcome := true + for k, v := range p.poolOfSchemaValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaValidator should be redeemed. Allocated by: %s", p.poolOfSchemaValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfObjectValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("objectValidator should be redeemed. Allocated by: %s", p.poolOfObjectValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("sliceValidator should be redeemed. Allocated by: %s", p.poolOfSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfItemsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("itemsValidator should be redeemed. Allocated by: %s", p.poolOfItemsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicCommonValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicCommonValidator should be redeemed. Allocated by: %s", p.poolOfBasicCommonValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfHeaderValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("headerValidator should be redeemed. Allocated by: %s", p.poolOfHeaderValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfParamValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("paramValidator should be redeemed. Allocated by: %s", p.poolOfParamValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfBasicSliceValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("basicSliceValidator should be redeemed. Allocated by: %s", p.poolOfBasicSliceValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfNumberValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("numberValidator should be redeemed. Allocated by: %s", p.poolOfNumberValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfStringValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("stringValidator should be redeemed. Allocated by: %s", p.poolOfStringValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemaPropsValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemaPropsValidator should be redeemed. Allocated by: %s", p.poolOfSchemaPropsValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfFormatValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("formatValidator should be redeemed. Allocated by: %s", p.poolOfFormatValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfTypeValidators.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("typeValidator should be redeemed. Allocated by: %s", p.poolOfTypeValidators.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfSchemas.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("schemas should be redeemed. Allocated by: %s", p.poolOfSchemas.allocMap[k]) + outcome = false + } + for k, v := range p.poolOfResults.debugMap { + if v == statusRedeemed { + continue + } + t.Logf("result should be redeemed. Allocated by: %s", p.poolOfResults.allocMap[k]) + outcome = false + } + + return outcome +} + +func caller() string { + pc, _, _, _ := runtime.Caller(3) //nolint:dogsled + from, line := runtime.FuncForPC(pc).FileLine(pc) + + return fmt.Sprintf("%s:%d", from, line) +} diff --git a/vendor/github.com/go-openapi/validate/result.go b/vendor/github.com/go-openapi/validate/result.go index 605ec39604..c80804a93d 100644 --- a/vendor/github.com/go-openapi/validate/result.go +++ b/vendor/github.com/go-openapi/validate/result.go @@ -15,7 +15,7 @@ package validate import ( - "fmt" + stderrors "errors" "reflect" "strings" @@ -52,8 +52,8 @@ type Result struct { // Schemata for slice items itemSchemata []itemSchemata - cachedFieldSchemta map[FieldKey][]*spec.Schema - cachedItemSchemata map[ItemKey][]*spec.Schema + cachedFieldSchemata map[FieldKey][]*spec.Schema + cachedItemSchemata map[ItemKey][]*spec.Schema wantsRedeemOnMerge bool } @@ -121,7 +121,7 @@ func (r *Result) Merge(others ...*Result) *Result { r.mergeWithoutRootSchemata(other) r.rootObjectSchemata.Append(other.rootObjectSchemata) if other.wantsRedeemOnMerge { - poolOfResults.RedeemResult(other) + pools.poolOfResults.RedeemResult(other) } } return r @@ -140,8 +140,8 @@ func (r *Result) RootObjectSchemata() []*spec.Schema { // FieldSchemata returns the schemata which apply to fields in objects. func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { - if r.cachedFieldSchemta != nil { - return r.cachedFieldSchemta + if r.cachedFieldSchemata != nil { + return r.cachedFieldSchemata } ret := make(map[FieldKey][]*spec.Schema, len(r.fieldSchemata)) @@ -153,7 +153,8 @@ func (r *Result) FieldSchemata() map[FieldKey][]*spec.Schema { ret[key] = append(ret[key], fs.schemata.multiple...) } } - r.cachedFieldSchemta = ret + r.cachedFieldSchemata = ret + return ret } @@ -177,7 +178,7 @@ func (r *Result) ItemSchemata() map[ItemKey][]*spec.Schema { } func (r *Result) resetCaches() { - r.cachedFieldSchemta = nil + r.cachedFieldSchemata = nil r.cachedItemSchemata = nil } @@ -194,14 +195,15 @@ func (r *Result) mergeForField(obj map[string]interface{}, field string, other * if r.fieldSchemata == nil { r.fieldSchemata = make([]fieldSchemata, len(obj)) } + // clone other schemata, as other is about to be redeemed to the pool r.fieldSchemata = append(r.fieldSchemata, fieldSchemata{ obj: obj, field: field, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } if other.wantsRedeemOnMerge { - poolOfResults.RedeemResult(other) + pools.poolOfResults.RedeemResult(other) } return r @@ -220,14 +222,16 @@ func (r *Result) mergeForSlice(slice reflect.Value, i int, other *Result) *Resul if r.itemSchemata == nil { r.itemSchemata = make([]itemSchemata, slice.Len()) } + // clone other schemata, as other is about to be redeemed to the pool r.itemSchemata = append(r.itemSchemata, itemSchemata{ slice: slice, index: i, - schemata: other.rootObjectSchemata, + schemata: other.rootObjectSchemata.Clone(), }) } + if other.wantsRedeemOnMerge { - poolOfResults.RedeemResult(other) + pools.poolOfResults.RedeemResult(other) } return r @@ -272,17 +276,21 @@ func (r *Result) mergeWithoutRootSchemata(other *Result) { if other.fieldSchemata != nil { if r.fieldSchemata == nil { - r.fieldSchemata = other.fieldSchemata - } else { - r.fieldSchemata = append(r.fieldSchemata, other.fieldSchemata...) + r.fieldSchemata = make([]fieldSchemata, 0, len(other.fieldSchemata)) + } + for _, field := range other.fieldSchemata { + field.schemata = field.schemata.Clone() + r.fieldSchemata = append(r.fieldSchemata, field) } } if other.itemSchemata != nil { if r.itemSchemata == nil { - r.itemSchemata = other.itemSchemata - } else { - r.itemSchemata = append(r.itemSchemata, other.itemSchemata...) + r.itemSchemata = make([]itemSchemata, 0, len(other.itemSchemata)) + } + for _, field := range other.itemSchemata { + field.schemata = field.schemata.Clone() + r.itemSchemata = append(r.itemSchemata, field) } } } @@ -298,7 +306,7 @@ func (r *Result) MergeAsErrors(others ...*Result) *Result { r.AddErrors(other.Warnings...) r.MatchCount += other.MatchCount if other.wantsRedeemOnMerge { - poolOfResults.RedeemResult(other) + pools.poolOfResults.RedeemResult(other) } } } @@ -316,7 +324,7 @@ func (r *Result) MergeAsWarnings(others ...*Result) *Result { r.AddWarnings(other.Warnings...) r.MatchCount += other.MatchCount if other.wantsRedeemOnMerge { - poolOfResults.RedeemResult(other) + pools.poolOfResults.RedeemResult(other) } } } @@ -379,18 +387,18 @@ func (r *Result) keepRelevantErrors() *Result { strippedErrors := []error{} for _, e := range r.Errors { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedErrors = append(strippedErrors, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedErrors = append(strippedErrors, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } strippedWarnings := []error{} for _, e := range r.Warnings { if strings.HasPrefix(e.Error(), "IMPORTANT!") { - strippedWarnings = append(strippedWarnings, fmt.Errorf(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) + strippedWarnings = append(strippedWarnings, stderrors.New(strings.TrimPrefix(e.Error(), "IMPORTANT!"))) } } var strippedResult *Result if r.wantsRedeemOnMerge { - strippedResult = poolOfResults.BorrowResult() + strippedResult = pools.poolOfResults.BorrowResult() } else { strippedResult = new(Result) } @@ -465,8 +473,8 @@ func (r *Result) cleared() *Result { r.rootObjectSchemata.multiple = r.rootObjectSchemata.multiple[:0] r.fieldSchemata = r.fieldSchemata[:0] r.itemSchemata = r.itemSchemata[:0] - for k := range r.cachedFieldSchemta { - delete(r.cachedFieldSchemta, k) + for k := range r.cachedFieldSchemata { + delete(r.cachedFieldSchemata, k) } for k := range r.cachedItemSchemata { delete(r.cachedItemSchemata, k) @@ -502,7 +510,7 @@ func (s *schemata) Slice() []*spec.Schema { return s.multiple } -// appendSchemata appends the schemata in other to s. It mutated s in-place. +// appendSchemata appends the schemata in other to s. It mutates s in-place. func (s *schemata) Append(other schemata) { if other.one == nil && len(other.multiple) == 0 { return @@ -533,3 +541,23 @@ func (s *schemata) Append(other schemata) { } } } + +func (s schemata) Clone() schemata { + var clone schemata + + if s.one != nil { + clone.one = new(spec.Schema) + *clone.one = *s.one + } + + if len(s.multiple) > 0 { + clone.multiple = make([]*spec.Schema, len(s.multiple)) + for idx := 0; idx < len(s.multiple); idx++ { + sp := new(spec.Schema) + *sp = *s.multiple[idx] + clone.multiple[idx] = sp + } + } + + return clone +} diff --git a/vendor/github.com/go-openapi/validate/schema.go b/vendor/github.com/go-openapi/validate/schema.go index a359331121..db65264fd1 100644 --- a/vendor/github.com/go-openapi/validate/schema.go +++ b/vendor/github.com/go-openapi/validate/schema.go @@ -43,7 +43,7 @@ func AgainstSchema(schema *spec.Schema, data interface{}, formats strfmt.Registr append(options, WithRecycleValidators(true), withRecycleResults(true))..., ).Validate(data) defer func() { - poolOfResults.RedeemResult(res) + pools.poolOfResults.RedeemResult(res) }() if res.HasErrors() { @@ -88,7 +88,7 @@ func newSchemaValidator(schema *spec.Schema, rootSchema interface{}, root string var s *SchemaValidator if opts.recycleValidators { - s = poolOfSchemaValidators.BorrowValidator() + s = pools.poolOfSchemaValidators.BorrowValidator() } else { s = new(SchemaValidator) } @@ -133,13 +133,14 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if s.Options.recycleValidators { defer func() { + s.redeemChildren() s.redeem() // one-time use validator }() } var result *Result if s.Options.recycleResult { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() result.data = data } else { result = &Result{data: data} @@ -157,7 +158,6 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if s.Options.recycleValidators { s.validators[0] = nil s.validators[6] = nil - s.redeemChildren() } return result @@ -188,6 +188,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if erri != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, erri)) result.Inc() + return result } d = in @@ -196,6 +197,7 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { if errf != nil { result.AddErrors(invalidTypeConversionMsg(s.Path, errf)) result.Inc() + return result } d = nf @@ -222,6 +224,9 @@ func (s *SchemaValidator) Validate(data interface{}) *Result { } result.Merge(v.Validate(d)) + if s.Options.recycleValidators { + s.validators[idx] = nil // prevents further (unsafe) usage + } result.Inc() } result.Inc() @@ -330,7 +335,7 @@ func (s *SchemaValidator) objectValidator() valueValidator { } func (s *SchemaValidator) redeem() { - poolOfSchemaValidators.RedeemValidator(s) + pools.poolOfSchemaValidators.RedeemValidator(s) } func (s *SchemaValidator) redeemChildren() { diff --git a/vendor/github.com/go-openapi/validate/schema_props.go b/vendor/github.com/go-openapi/validate/schema_props.go index bbea4a37db..1ca379244d 100644 --- a/vendor/github.com/go-openapi/validate/schema_props.go +++ b/vendor/github.com/go-openapi/validate/schema_props.go @@ -70,7 +70,7 @@ func newSchemaPropsValidator( var s *schemaPropsValidator if opts.recycleValidators { - s = poolOfSchemaPropsValidators.BorrowValidator() + s = pools.poolOfSchemaPropsValidators.BorrowValidator() } else { s = new(schemaPropsValidator) } @@ -100,8 +100,8 @@ func (s *schemaPropsValidator) Applies(source interface{}, _ reflect.Kind) bool func (s *schemaPropsValidator) Validate(data interface{}) *Result { var mainResult *Result - if s.Options.recycleValidators { - mainResult = poolOfResults.BorrowResult() + if s.Options.recycleResult { + mainResult = pools.poolOfResults.BorrowResult() } else { mainResult = new(Result) } @@ -109,179 +109,240 @@ func (s *schemaPropsValidator) Validate(data interface{}) *Result { // Intermediary error results // IMPORTANT! messages from underlying validators - keepResultAnyOf := poolOfResults.BorrowResult() - keepResultOneOf := poolOfResults.BorrowResult() - keepResultAllOf := poolOfResults.BorrowResult() + var keepResultAnyOf, keepResultOneOf, keepResultAllOf *Result if s.Options.recycleValidators { defer func() { + s.redeemChildren() s.redeem() // results are redeemed when merged }() } - // Validates at least one in anyOf schemas - var firstSuccess *Result if len(s.anyOfValidators) > 0 { - var bestFailures *Result - succeededOnce := false - for _, anyOfSchema := range s.anyOfValidators { - result := anyOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAnyOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - bestFailures = nil - succeededOnce = true - firstSuccess = result - _ = keepResultAnyOf.cleared() + keepResultAnyOf = pools.poolOfResults.BorrowResult() + s.validateAnyOf(data, mainResult, keepResultAnyOf) + } - break - } - // MatchCount is used to select errors from the schema with most positive checks - if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { - bestFailures = result - } + if len(s.oneOfValidators) > 0 { + keepResultOneOf = pools.poolOfResults.BorrowResult() + s.validateOneOf(data, mainResult, keepResultOneOf) + } + + if len(s.allOfValidators) > 0 { + keepResultAllOf = pools.poolOfResults.BorrowResult() + s.validateAllOf(data, mainResult, keepResultAllOf) + } + + if s.notValidator != nil { + s.validateNot(data, mainResult) + } + + if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { + s.validateDependencies(data, mainResult) + } + + mainResult.Inc() + + // In the end we retain best failures for schema validation + // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). + return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) +} + +func (s *schemaPropsValidator) validateAnyOf(data interface{}, mainResult, keepResultAnyOf *Result) { + // Validates at least one in anyOf schemas + var bestFailures *Result + + for i, anyOfSchema := range s.anyOfValidators { + result := anyOfSchema.Validate(data) + if s.Options.recycleValidators { + s.anyOfValidators[i] = nil } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAnyOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result + + if result.IsValid() { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) + } - if !succeededOnce { - mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + _ = keepResultAnyOf.cleared() + mainResult.Merge(result) + + return } - if bestFailures != nil { - mainResult.Merge(bestFailures) - if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { - poolOfResults.RedeemResult(firstSuccess) + + // MatchCount is used to select errors from the schema with most positive checks + if bestFailures == nil || result.MatchCount > bestFailures.MatchCount { + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) } - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) + bestFailures = result + + continue + } + + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched } } + mainResult.AddErrors(mustValidateAtLeastOneSchemaMsg(s.Path)) + mainResult.Merge(bestFailures) +} + +func (s *schemaPropsValidator) validateOneOf(data interface{}, mainResult, keepResultOneOf *Result) { // Validates exactly one in oneOf schemas - if len(s.oneOfValidators) > 0 { - var bestFailures *Result - var firstSuccess *Result - validated := 0 - - for _, oneOfSchema := range s.oneOfValidators { - result := oneOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultOneOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - validated++ - bestFailures = nil - if firstSuccess == nil { - firstSuccess = result - } - _ = keepResultOneOf.cleared() - continue - } - // MatchCount is used to select errors from the schema with most positive checks - if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { - bestFailures = result - } + var ( + firstSuccess, bestFailures *Result + validated int + ) + + for i, oneOfSchema := range s.oneOfValidators { + result := oneOfSchema.Validate(data) + if s.Options.recycleValidators { + s.oneOfValidators[i] = nil } - if validated != 1 { - var additionalMsg string - if validated == 0 { - additionalMsg = "Found none valid" - } else { - additionalMsg = fmt.Sprintf("Found %d valid alternatives", validated) - } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultOneOf.Merge(result.keepRelevantErrors()) // merges (and redeems) a new instance of Result - mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, additionalMsg)) - if bestFailures != nil { - mainResult.Merge(bestFailures) - } - if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { - poolOfResults.RedeemResult(firstSuccess) + if result.IsValid() { + validated++ + _ = keepResultOneOf.cleared() + + if firstSuccess == nil { + firstSuccess = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched } - } else if firstSuccess != nil { - mainResult.Merge(firstSuccess) + + continue + } + + // MatchCount is used to select errors from the schema with most positive checks + if validated == 0 && (bestFailures == nil || result.MatchCount > bestFailures.MatchCount) { if bestFailures != nil && bestFailures.wantsRedeemOnMerge { - poolOfResults.RedeemResult(bestFailures) + pools.poolOfResults.RedeemResult(bestFailures) } + bestFailures = result + } else if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched } } - // Validates all of allOf schemas - if len(s.allOfValidators) > 0 { - validated := 0 - - for _, allOfSchema := range s.allOfValidators { - result := allOfSchema.Validate(data) - // We keep inner IMPORTANT! errors no matter what MatchCount tells us - keepResultAllOf.Merge(result.keepRelevantErrors()) - if result.IsValid() { - validated++ - } - mainResult.Merge(result) + switch validated { + case 0: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, "Found none valid")) + mainResult.Merge(bestFailures) + // firstSucess necessarily nil + case 1: + mainResult.Merge(firstSuccess) + if bestFailures != nil && bestFailures.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(bestFailures) } - - if validated != len(s.allOfValidators) { - additionalMsg := "" - if validated == 0 { - additionalMsg = ". None validated" - } - - mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, additionalMsg)) + default: + mainResult.AddErrors(mustValidateOnlyOneSchemaMsg(s.Path, fmt.Sprintf("Found %d valid alternatives", validated))) + mainResult.Merge(bestFailures) + if firstSuccess != nil && firstSuccess.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(firstSuccess) } } +} - if s.notValidator != nil { - result := s.notValidator.Validate(data) +func (s *schemaPropsValidator) validateAllOf(data interface{}, mainResult, keepResultAllOf *Result) { + // Validates all of allOf schemas + var validated int + + for i, allOfSchema := range s.allOfValidators { + result := allOfSchema.Validate(data) + if s.Options.recycleValidators { + s.allOfValidators[i] = nil + } // We keep inner IMPORTANT! errors no matter what MatchCount tells us + keepResultAllOf.Merge(result.keepRelevantErrors()) if result.IsValid() { - mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + validated++ } + mainResult.Merge(result) } - if s.Dependencies != nil && len(s.Dependencies) > 0 && reflect.TypeOf(data).Kind() == reflect.Map { - val := data.(map[string]interface{}) - for key := range val { - if dep, ok := s.Dependencies[key]; ok { - - if dep.Schema != nil { - mainResult.Merge( - newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), - ) - continue - } + switch validated { + case 0: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, ". None validated")) + case len(s.allOfValidators): + default: + mainResult.AddErrors(mustValidateAllSchemasMsg(s.Path, "")) + } +} + +func (s *schemaPropsValidator) validateNot(data interface{}, mainResult *Result) { + result := s.notValidator.Validate(data) + if s.Options.recycleValidators { + s.notValidator = nil + } + // We keep inner IMPORTANT! errors no matter what MatchCount tells us + if result.IsValid() { + mainResult.AddErrors(mustNotValidatechemaMsg(s.Path)) + } + if result.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(result) // this result is ditched + } +} + +func (s *schemaPropsValidator) validateDependencies(data interface{}, mainResult *Result) { + val := data.(map[string]interface{}) + for key := range val { + dep, ok := s.Dependencies[key] + if !ok { + continue + } + + if dep.Schema != nil { + mainResult.Merge( + newSchemaValidator(dep.Schema, s.Root, s.Path+"."+key, s.KnownFormats, s.Options).Validate(data), + ) + continue + } - if len(dep.Property) > 0 { - for _, depKey := range dep.Property { - if _, ok := val[depKey]; !ok { - mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) - } - } + if len(dep.Property) > 0 { + for _, depKey := range dep.Property { + if _, ok := val[depKey]; !ok { + mainResult.AddErrors(hasADependencyMsg(s.Path, depKey)) } } } } - - mainResult.Inc() - // In the end we retain best failures for schema validation - // plus, if any, composite errors which may explain special cases (tagged as IMPORTANT!). - return mainResult.Merge(keepResultAllOf, keepResultOneOf, keepResultAnyOf) } func (s *schemaPropsValidator) redeem() { - poolOfSchemaPropsValidators.RedeemValidator(s) + pools.poolOfSchemaPropsValidators.RedeemValidator(s) } func (s *schemaPropsValidator) redeemChildren() { for _, v := range s.anyOfValidators { + if v == nil { + continue + } v.redeemChildren() v.redeem() } s.anyOfValidators = nil + for _, v := range s.allOfValidators { + if v == nil { + continue + } v.redeemChildren() v.redeem() } s.allOfValidators = nil + for _, v := range s.oneOfValidators { + if v == nil { + continue + } v.redeemChildren() v.redeem() } diff --git a/vendor/github.com/go-openapi/validate/slice_validator.go b/vendor/github.com/go-openapi/validate/slice_validator.go index e974d3e052..13bb02087d 100644 --- a/vendor/github.com/go-openapi/validate/slice_validator.go +++ b/vendor/github.com/go-openapi/validate/slice_validator.go @@ -45,7 +45,7 @@ func newSliceValidator(path, in string, var v *schemaSliceValidator if opts.recycleValidators { - v = poolOfSliceValidators.BorrowValidator() + v = pools.poolOfSliceValidators.BorrowValidator() } else { v = new(schemaSliceValidator) } @@ -83,7 +83,7 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { var result *Result if s.Options.recycleResult { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { result = new(Result) } @@ -146,5 +146,5 @@ func (s *schemaSliceValidator) Validate(data interface{}) *Result { } func (s *schemaSliceValidator) redeem() { - poolOfSliceValidators.RedeemValidator(s) + pools.poolOfSliceValidators.RedeemValidator(s) } diff --git a/vendor/github.com/go-openapi/validate/spec.go b/vendor/github.com/go-openapi/validate/spec.go index ee322a2df1..965452566e 100644 --- a/vendor/github.com/go-openapi/validate/spec.go +++ b/vendor/github.com/go-openapi/validate/spec.go @@ -15,6 +15,8 @@ package validate import ( + "bytes" + "encoding/gob" "encoding/json" "fmt" "sort" @@ -69,7 +71,7 @@ func NewSpecValidator(schema *spec.Schema, formats strfmt.Registry) *SpecValidat for _, o := range []Option{ SwaggerSchema(true), WithRecycleValidators(true), - withRecycleResults(true), + // withRecycleResults(true), } { o(schemaOptions) } @@ -160,7 +162,7 @@ func (s *SpecValidator) Validate(data interface{}) (*Result, *Result) { } func (s *SpecValidator) validateNonEmptyPathParamNames() *Result { - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() if s.spec.Spec().Paths == nil { // There is no Paths object: error res.AddErrors(noValidPathMsg()) @@ -194,7 +196,7 @@ func (s *SpecValidator) validateDuplicateOperationIDs() *Result { // fallback on possible incomplete picture because of previous errors analyzer = s.analyzer } - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() known := make(map[string]int) for _, v := range analyzer.OperationIDs() { if v != "" { @@ -216,7 +218,7 @@ type dupProp struct { func (s *SpecValidator) validateDuplicatePropertyNames() *Result { // definition can't declare a property that's already defined by one of its ancestors - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() for k, sch := range s.spec.Spec().Definitions { if len(sch.AllOf) == 0 { continue @@ -265,7 +267,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, schn := nm schc := &sch - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() for schc.Ref.String() != "" { // gather property names @@ -302,7 +304,7 @@ func (s *SpecValidator) validateSchemaPropertyNames(nm string, sch spec.Schema, } func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, knowns map[string]struct{}) ([]string, *Result) { - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() if sch.Ref.String() == "" && len(sch.AllOf) == 0 { // Safeguard. We should not be able to actually get there return nil, res @@ -352,7 +354,7 @@ func (s *SpecValidator) validateCircularAncestry(nm string, sch spec.Schema, kno func (s *SpecValidator) validateItems() *Result { // validate parameter, items, schema and response objects for presence of item if type is array - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() for method, pi := range s.analyzer.Operations() { for path, op := range pi { @@ -411,7 +413,7 @@ func (s *SpecValidator) validateItems() *Result { // Verifies constraints on array type func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID string) *Result { - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() if !schema.Type.Contains(arrayType) { return res } @@ -435,7 +437,7 @@ func (s *SpecValidator) validateSchemaItems(schema spec.Schema, prefix, opID str func (s *SpecValidator) validatePathParamPresence(path string, fromPath, fromOperation []string) *Result { // Each defined operation path parameters must correspond to a named element in the API's path pattern. // (For example, you cannot have a path parameter named id for the following path /pets/{petId} but you must have a path parameter named petId.) - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() for _, l := range fromPath { var matched bool for _, r := range fromOperation { @@ -491,7 +493,7 @@ func (s *SpecValidator) validateReferencedParameters() *Result { if len(expected) == 0 { return nil } - result := poolOfResults.BorrowResult() + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedParamMsg(k)) } @@ -516,7 +518,7 @@ func (s *SpecValidator) validateReferencedResponses() *Result { if len(expected) == 0 { return nil } - result := poolOfResults.BorrowResult() + result := pools.poolOfResults.BorrowResult() for k := range expected { result.AddWarnings(unusedResponseMsg(k)) } @@ -551,7 +553,7 @@ func (s *SpecValidator) validateReferencedDefinitions() *Result { func (s *SpecValidator) validateRequiredDefinitions() *Result { // Each property listed in the required array must be defined in the properties of the model - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() DEFINITIONS: for d, schema := range s.spec.Spec().Definitions { @@ -570,7 +572,7 @@ DEFINITIONS: func (s *SpecValidator) validateRequiredProperties(path, in string, v *spec.Schema) *Result { // Takes care of recursive property definitions, which may be nested in additionalProperties schemas - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() propertyMatch := false patternMatch := false additionalPropertiesMatch := false @@ -636,7 +638,7 @@ func (s *SpecValidator) validateParameters() *Result { // - parameters with pattern property must specify valid patterns // - $ref in parameters must resolve // - path param must be required - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() rexGarbledPathSegment := mustCompileRegexp(`.*[{}\s]+.*`) for method, pi := range s.expandedAnalyzer().Operations() { methodPaths := make(map[string]map[string]string) @@ -675,17 +677,22 @@ func (s *SpecValidator) validateParameters() *Result { // TODO: should be done after param expansion res.Merge(s.checkUniqueParams(path, method, op)) - paramSchema, ok := s.schema.Definitions["parameter"] + // pick the root schema from the swagger specification which describes a parameter + origSchema, ok := s.schema.Definitions["parameter"] if !ok { panic("unexpected swagger schema: missing #/definitions/parameter") } + // clone it once to avoid expanding a global schema (e.g. swagger spec) + paramSchema, err := deepCloneSchema(origSchema) + if err != nil { + panic(fmt.Errorf("can't clone schema: %v", err)) + } for _, pr := range paramHelp.safeExpandedParamsFor(path, method, op.ID, res, s) { - // An expanded parameter must validate its schema (an unexpanded $ref always passes high-level schema validation) + // An expanded parameter must validate the Parameter schema (an unexpanded $ref always passes high-level schema validation) schv := newSchemaValidator(¶mSchema, s.schema, fmt.Sprintf("%s.%s.parameters.%s", path, method, pr.Name), s.KnownFormats, s.schemaOptions) obj := swag.ToDynamicJSON(pr) - paramValidationResult := schv.Validate(obj) - res.Merge(paramValidationResult) + res.Merge(schv.Validate(obj)) // Validate pattern regexp for parameters with a Pattern property if _, err := compileRegexp(pr.Pattern); err != nil { @@ -768,7 +775,7 @@ func (s *SpecValidator) validateParameters() *Result { func (s *SpecValidator) validateReferencesValid() *Result { // each reference must point to a valid object - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() for _, r := range s.analyzer.AllRefs() { if !r.IsValidURI(s.spec.SpecFilePath()) { // Safeguard - spec should always yield a valid URI res.AddErrors(invalidRefMsg(r.String())) @@ -794,7 +801,7 @@ func (s *SpecValidator) checkUniqueParams(path, method string, op *spec.Operatio // However, there are some issues with such a factorization: // - analysis does not seem to fully expand params // - param keys may be altered by x-go-name - res := poolOfResults.BorrowResult() + res := pools.poolOfResults.BorrowResult() pnames := make(map[string]struct{}) if op.Parameters != nil { // Safeguard @@ -829,3 +836,17 @@ func (s *SpecValidator) expandedAnalyzer() *analysis.Spec { } return s.analyzer } + +func deepCloneSchema(src spec.Schema) (spec.Schema, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return spec.Schema{}, err + } + + var dst spec.Schema + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return spec.Schema{}, err + } + + return dst, nil +} diff --git a/vendor/github.com/go-openapi/validate/type.go b/vendor/github.com/go-openapi/validate/type.go index 5647a4429e..f87abb3d56 100644 --- a/vendor/github.com/go-openapi/validate/type.go +++ b/vendor/github.com/go-openapi/validate/type.go @@ -40,7 +40,7 @@ func newTypeValidator(path, in string, typ spec.StringOrArray, nullable bool, fo var t *typeValidator if opts.recycleValidators { - t = poolOfTypeValidators.BorrowValidator() + t = pools.poolOfTypeValidators.BorrowValidator() } else { t = new(typeValidator) } @@ -209,5 +209,5 @@ func (t *typeValidator) Validate(data interface{}) *Result { } func (t *typeValidator) redeem() { - poolOfTypeValidators.RedeemValidator(t) + pools.poolOfTypeValidators.RedeemValidator(t) } diff --git a/vendor/github.com/go-openapi/validate/validator.go b/vendor/github.com/go-openapi/validate/validator.go index aa322643c3..c083aecc9d 100644 --- a/vendor/github.com/go-openapi/validate/validator.go +++ b/vendor/github.com/go-openapi/validate/validator.go @@ -51,7 +51,7 @@ func newItemsValidator(path, in string, items *spec.Items, root interface{}, for var iv *itemsValidator if opts.recycleValidators { - iv = poolOfItemsValidators.BorrowValidator() + iv = pools.poolOfItemsValidators.BorrowValidator() } else { iv = new(itemsValidator) } @@ -83,11 +83,11 @@ func (i *itemsValidator) Validate(index int, data interface{}) *Result { tpe := reflect.TypeOf(data) kind := tpe.Kind() - var mainResult *Result + var result *Result if i.Options.recycleResult { - mainResult = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { - mainResult = new(Result) + result = new(Result) } path := fmt.Sprintf("%s.%d", i.path, index) @@ -109,15 +109,23 @@ func (i *itemsValidator) Validate(index int, data interface{}) *Result { } validator.SetPath(path) - result := validator.Validate(data) - mainResult.Merge(result) - mainResult.Inc() - if result != nil && result.HasErrors() { - break + err := validator.Validate(data) + if i.Options.recycleValidators { + i.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { + result.Inc() + if err.HasErrors() { + result.Merge(err) + + break + } + + result.Merge(err) } } - return mainResult + return result } func (i *itemsValidator) typeValidator() valueValidator { @@ -197,7 +205,7 @@ func (i *itemsValidator) formatValidator() valueValidator { } func (i *itemsValidator) redeem() { - poolOfItemsValidators.RedeemValidator(i) + pools.poolOfItemsValidators.RedeemValidator(i) } func (i *itemsValidator) redeemChildren() { @@ -230,7 +238,7 @@ func newBasicCommonValidator(path, in string, def interface{}, enum []interface{ var b *basicCommonValidator if opts.recycleValidators { - b = poolOfBasicCommonValidators.BorrowValidator() + b = pools.poolOfBasicCommonValidators.BorrowValidator() } else { b = new(basicCommonValidator) } @@ -286,7 +294,7 @@ func (b *basicCommonValidator) Validate(data interface{}) (res *Result) { } func (b *basicCommonValidator) redeem() { - poolOfBasicCommonValidators.RedeemValidator(b) + pools.poolOfBasicCommonValidators.RedeemValidator(b) } // A HeaderValidator has very limited subset of validations to apply @@ -315,7 +323,7 @@ func newHeaderValidator(name string, header *spec.Header, formats strfmt.Registr var p *HeaderValidator if opts.recycleValidators { - p = poolOfHeaderValidators.BorrowValidator() + p = pools.poolOfHeaderValidators.BorrowValidator() } else { p = new(HeaderValidator) } @@ -358,7 +366,7 @@ func (p *HeaderValidator) Validate(data interface{}) *Result { var result *Result if p.Options.recycleResult { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { result = new(Result) } @@ -382,13 +390,19 @@ func (p *HeaderValidator) Validate(data interface{}) *Result { continue } - if err := validator.Validate(data); err != nil { - result.Merge(err) + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { if err.HasErrors() { + result.Merge(err) break } + result.Merge(err) } } + return result } @@ -458,7 +472,7 @@ func (p *HeaderValidator) formatValidator() valueValidator { } func (p *HeaderValidator) redeem() { - poolOfHeaderValidators.RedeemValidator(p) + pools.poolOfHeaderValidators.RedeemValidator(p) } func (p *HeaderValidator) redeemChildren() { @@ -501,7 +515,7 @@ func newParamValidator(param *spec.Parameter, formats strfmt.Registry, opts *Sch var p *ParamValidator if opts.recycleValidators { - p = poolOfParamValidators.BorrowValidator() + p = pools.poolOfParamValidators.BorrowValidator() } else { p = new(ParamValidator) } @@ -536,7 +550,7 @@ func (p *ParamValidator) Validate(data interface{}) *Result { var result *Result if p.Options.recycleResult { - result = poolOfResults.BorrowResult() + result = pools.poolOfResults.BorrowResult() } else { result = new(Result) } @@ -568,11 +582,16 @@ func (p *ParamValidator) Validate(data interface{}) *Result { continue } - if err := validator.Validate(data); err != nil { - result.Merge(err) + err := validator.Validate(data) + if p.Options.recycleValidators { + p.validators[idx] = nil // prevents further (unsafe) usage + } + if err != nil { if err.HasErrors() { + result.Merge(err) break } + result.Merge(err) } } @@ -645,7 +664,7 @@ func (p *ParamValidator) formatValidator() valueValidator { } func (p *ParamValidator) redeem() { - poolOfParamValidators.RedeemValidator(p) + pools.poolOfParamValidators.RedeemValidator(p) } func (p *ParamValidator) redeemChildren() { @@ -687,7 +706,7 @@ func newBasicSliceValidator( var s *basicSliceValidator if opts.recycleValidators { - s = poolOfBasicSliceValidators.BorrowValidator() + s = pools.poolOfBasicSliceValidators.BorrowValidator() } else { s = new(basicSliceValidator) } @@ -753,8 +772,13 @@ func (s *basicSliceValidator) Validate(data interface{}) *Result { for i := 0; i < int(size); i++ { itemsValidator := newItemsValidator(s.Path, s.In, s.Items, s.Source, s.KnownFormats, s.Options) ele := val.Index(i) - if err := itemsValidator.Validate(i, ele.Interface()); err != nil && err.HasErrors() { - return err + if err := itemsValidator.Validate(i, ele.Interface()); err != nil { + if err.HasErrors() { + return err + } + if err.wantsRedeemOnMerge { + pools.poolOfResults.RedeemResult(err) + } } } @@ -762,7 +786,7 @@ func (s *basicSliceValidator) Validate(data interface{}) *Result { } func (s *basicSliceValidator) redeem() { - poolOfBasicSliceValidators.RedeemValidator(s) + pools.poolOfBasicSliceValidators.RedeemValidator(s) } type numberValidator struct { @@ -791,7 +815,7 @@ func newNumberValidator( var n *numberValidator if opts.recycleValidators { - n = poolOfNumberValidators.BorrowValidator() + n = pools.poolOfNumberValidators.BorrowValidator() } else { n = new(numberValidator) } @@ -854,17 +878,13 @@ func (n *numberValidator) Validate(val interface{}) *Result { }() } - var res *Result + var res, resMultiple, resMinimum, resMaximum *Result if n.Options.recycleResult { - res = poolOfResults.BorrowResult() + res = pools.poolOfResults.BorrowResult() } else { res = new(Result) } - resMultiple := poolOfResults.BorrowResult() - resMinimum := poolOfResults.BorrowResult() - resMaximum := poolOfResults.BorrowResult() - // Used only to attempt to validate constraint on value, // even though value or constraint specified do not match type and format data := valueHelp.asFloat64(val) @@ -873,6 +893,8 @@ func (n *numberValidator) Validate(val interface{}) *Result { res.AddErrors(IsValueValidAgainstRange(val, n.Type, n.Format, "Checked", n.Path)) if n.MultipleOf != nil { + resMultiple = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMultiple.AddErrors(IsValueValidAgainstRange(*n.MultipleOf, n.Type, n.Format, "MultipleOf", n.Path)) if resMultiple.IsValid() { @@ -889,6 +911,8 @@ func (n *numberValidator) Validate(val interface{}) *Result { } if n.Maximum != nil { + resMaximum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMaximum.AddErrors(IsValueValidAgainstRange(*n.Maximum, n.Type, n.Format, "Maximum boundary", n.Path)) if resMaximum.IsValid() { @@ -905,6 +929,8 @@ func (n *numberValidator) Validate(val interface{}) *Result { } if n.Minimum != nil { + resMinimum = pools.poolOfResults.BorrowResult() + // Is the constraint specifier within the range of the specific numeric type and format? resMinimum.AddErrors(IsValueValidAgainstRange(*n.Minimum, n.Type, n.Format, "Minimum boundary", n.Path)) if resMinimum.IsValid() { @@ -921,11 +947,12 @@ func (n *numberValidator) Validate(val interface{}) *Result { } res.Merge(resMultiple, resMinimum, resMaximum) res.Inc() + return res } func (n *numberValidator) redeem() { - poolOfNumberValidators.RedeemValidator(n) + pools.poolOfNumberValidators.RedeemValidator(n) } type stringValidator struct { @@ -950,7 +977,7 @@ func newStringValidator( var s *stringValidator if opts.recycleValidators { - s = poolOfStringValidators.BorrowValidator() + s = pools.poolOfStringValidators.BorrowValidator() } else { s = new(stringValidator) } @@ -1020,5 +1047,5 @@ func (s *stringValidator) Validate(val interface{}) *Result { } func (s *stringValidator) redeem() { - poolOfStringValidators.RedeemValidator(s) + pools.poolOfStringValidators.RedeemValidator(s) } diff --git a/vendor/github.com/prometheus/alertmanager/NOTICE b/vendor/github.com/prometheus/alertmanager/NOTICE index f5d0bbb421..e0dd624377 100644 --- a/vendor/github.com/prometheus/alertmanager/NOTICE +++ b/vendor/github.com/prometheus/alertmanager/NOTICE @@ -11,8 +11,3 @@ Bootstrap http://getbootstrap.com Copyright 2011-2014 Twitter, Inc. Licensed under the MIT License - -bootstrap-datetimepicker.js -http://www.eyecon.ro/bootstrap-datepicker -Copyright 2012 Stefan Petre -Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go index ae1f248c99..2fa4f2314b 100644 --- a/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go +++ b/vendor/github.com/prometheus/alertmanager/api/v2/models/alert_status.go @@ -38,6 +38,10 @@ type AlertStatus struct { // Required: true InhibitedBy []string `json:"inhibitedBy"` + // muted by + // Required: true + MutedBy []string `json:"mutedBy"` + // silenced by // Required: true SilencedBy []string `json:"silencedBy"` @@ -56,6 +60,10 @@ func (m *AlertStatus) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := m.validateMutedBy(formats); err != nil { + res = append(res, err) + } + if err := m.validateSilencedBy(formats); err != nil { res = append(res, err) } @@ -79,6 +87,15 @@ func (m *AlertStatus) validateInhibitedBy(formats strfmt.Registry) error { return nil } +func (m *AlertStatus) validateMutedBy(formats strfmt.Registry) error { + + if err := validate.Required("mutedBy", "body", m.MutedBy); err != nil { + return err + } + + return nil +} + func (m *AlertStatus) validateSilencedBy(formats strfmt.Registry) error { if err := validate.Required("silencedBy", "body", m.SilencedBy); err != nil { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index e0ac346665..6f1200180a 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -158,6 +158,9 @@ func GatherAndCount(g prometheus.Gatherer, metricNames ...string) (int, error) { // ScrapeAndCompare calls a remote exporter's endpoint which is expected to return some metrics in // plain text format. Then it compares it with the results that the `expected` would return. // If the `metricNames` is not empty it would filter the comparison only to the given metric names. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and scraped metrics. See https://github.com/prometheus/client_golang/issues/1351. func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) error { resp, err := http.Get(url) if err != nil { @@ -185,6 +188,9 @@ func ScrapeAndCompare(url string, expected io.Reader, metricNames ...string) err // CollectAndCompare collects the metrics identified by `metricNames` and compares them in the Prometheus text // exposition format to the data read from expected. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and collected metrics. See https://github.com/prometheus/client_golang/issues/1351. func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error { reg := prometheus.NewPedanticRegistry() if err := reg.Register(c); err != nil { @@ -197,6 +203,9 @@ func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames . // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error { return TransactionalGatherAndCompare(prometheus.ToTransactionalGatherer(g), expected, metricNames...) } @@ -205,6 +214,9 @@ func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ... // it to an expected output read from the provided Reader in the Prometheus text // exposition format. If any metricNames are provided, only metrics with those // names are compared. +// +// NOTE: Be mindful of accidental discrepancies between expected and metricNames; metricNames filter +// both expected and gathered metrics. See https://github.com/prometheus/client_golang/issues/1351. func TransactionalGatherAndCompare(g prometheus.TransactionalGatherer, expected io.Reader, metricNames ...string) error { got, done, err := g.Gather() defer done() @@ -277,15 +289,6 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str if metricNames != nil { got = filterMetrics(got, metricNames) expected = filterMetrics(expected, metricNames) - if len(metricNames) > len(got) { - var missingMetricNames []string - for _, name := range metricNames { - if ok := hasMetricByName(got, name); !ok { - missingMetricNames = append(missingMetricNames, name) - } - } - return fmt.Errorf("expected metric name(s) not found: %v", missingMetricNames) - } } return compare(got, expected) @@ -327,12 +330,3 @@ func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFam } return filtered } - -func hasMetricByName(metrics []*dto.MetricFamily, name string) bool { - for _, mf := range metrics { - if mf.GetName() == name { - return true - } - } - return false -} diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index b640b89953..57ec252adf 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -357,33 +357,33 @@ func nonZeroCount[T comparable](values ...T) int { func (c *HTTPClientConfig) Validate() error { // Backwards compatibility with the bearer_token field. if len(c.BearerToken) > 0 && len(c.BearerTokenFile) > 0 { - return fmt.Errorf("at most one of bearer_token & bearer_token_file must be configured") + return errors.New("at most one of bearer_token & bearer_token_file must be configured") } if (c.BasicAuth != nil || c.OAuth2 != nil) && (len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0) { - return fmt.Errorf("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") + return errors.New("at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Username) != "", c.BasicAuth.UsernameFile != "", c.BasicAuth.UsernameRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth username, username_file & username_ref must be configured") + return errors.New("at most one of basic_auth username, username_file & username_ref must be configured") } if c.BasicAuth != nil && nonZeroCount(string(c.BasicAuth.Password) != "", c.BasicAuth.PasswordFile != "", c.BasicAuth.PasswordRef != "") > 1 { - return fmt.Errorf("at most one of basic_auth password, password_file & password_ref must be configured") + return errors.New("at most one of basic_auth password, password_file & password_ref must be configured") } if c.Authorization != nil { if len(c.BearerToken) > 0 || len(c.BearerTokenFile) > 0 { - return fmt.Errorf("authorization is not compatible with bearer_token & bearer_token_file") + return errors.New("authorization is not compatible with bearer_token & bearer_token_file") } if nonZeroCount(string(c.Authorization.Credentials) != "", c.Authorization.CredentialsFile != "", c.Authorization.CredentialsRef != "") > 1 { - return fmt.Errorf("at most one of authorization credentials & credentials_file must be configured") + return errors.New("at most one of authorization credentials & credentials_file must be configured") } c.Authorization.Type = strings.TrimSpace(c.Authorization.Type) if len(c.Authorization.Type) == 0 { c.Authorization.Type = "Bearer" } if strings.ToLower(c.Authorization.Type) == "basic" { - return fmt.Errorf(`authorization type cannot be set to "basic", use "basic_auth" instead`) + return errors.New(`authorization type cannot be set to "basic", use "basic_auth" instead`) } if c.BasicAuth != nil || c.OAuth2 != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } } else { if len(c.BearerToken) > 0 { @@ -399,16 +399,16 @@ func (c *HTTPClientConfig) Validate() error { } if c.OAuth2 != nil { if c.BasicAuth != nil { - return fmt.Errorf("at most one of basic_auth, oauth2 & authorization must be configured") + return errors.New("at most one of basic_auth, oauth2 & authorization must be configured") } if len(c.OAuth2.ClientID) == 0 { - return fmt.Errorf("oauth2 client_id must be configured") + return errors.New("oauth2 client_id must be configured") } if len(c.OAuth2.TokenURL) == 0 { - return fmt.Errorf("oauth2 token_url must be configured") + return errors.New("oauth2 token_url must be configured") } if nonZeroCount(len(c.OAuth2.ClientSecret) > 0, len(c.OAuth2.ClientSecretFile) > 0, len(c.OAuth2.ClientSecretRef) > 0) > 1 { - return fmt.Errorf("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") + return errors.New("at most one of oauth2 client_secret, client_secret_file & client_secret_ref must be configured") } } if err := c.ProxyConfig.Validate(); err != nil { @@ -679,8 +679,8 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { - // No need for a RoundTripper that reloads the CA file automatically. + if tlsSettings.immutable() { + // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) } return NewTLSRoundTripperWithContext(ctx, tlsConfig, tlsSettings, newRT) @@ -735,7 +735,7 @@ func (s *FileSecret) Fetch(ctx context.Context) (string, error) { } func (s *FileSecret) Description() string { - return fmt.Sprintf("file %s", s.file) + return "file " + s.file } func (s *FileSecret) Immutable() bool { @@ -753,7 +753,7 @@ func (s *refSecret) Fetch(ctx context.Context) (string, error) { } func (s *refSecret) Description() string { - return fmt.Sprintf("ref %s", s.ref) + return "ref " + s.ref } func (s *refSecret) Immutable() bool { @@ -914,7 +914,7 @@ func (rt *oauth2RoundTripper) newOauth2TokenSource(req *http.Request, secret str if err != nil { return nil, nil, err } - if tlsSettings.CA == nil || tlsSettings.CA.Immutable() { + if tlsSettings.immutable() { t, _ = tlsTransport(tlsConfig) } else { t, err = NewTLSRoundTripperWithContext(req.Context(), tlsConfig, tlsSettings, tlsTransport) @@ -1045,7 +1045,7 @@ func NewTLSConfigWithContext(ctx context.Context, cfg *TLSConfig, optFuncs ...TL if cfg.MaxVersion != 0 && cfg.MinVersion != 0 { if cfg.MaxVersion < cfg.MinVersion { - return nil, fmt.Errorf("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") + return nil, errors.New("tls_config.max_version must be greater than or equal to tls_config.min_version if both are specified") } } @@ -1144,19 +1144,19 @@ func (c *TLSConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { // used. func (c *TLSConfig) Validate() error { if nonZeroCount(len(c.CA) > 0, len(c.CAFile) > 0, len(c.CARef) > 0) > 1 { - return fmt.Errorf("at most one of ca, ca_file & ca_ref must be configured") + return errors.New("at most one of ca, ca_file & ca_ref must be configured") } if nonZeroCount(len(c.Cert) > 0, len(c.CertFile) > 0, len(c.CertRef) > 0) > 1 { - return fmt.Errorf("at most one of cert, cert_file & cert_ref must be configured") + return errors.New("at most one of cert, cert_file & cert_ref must be configured") } if nonZeroCount(len(c.Key) > 0, len(c.KeyFile) > 0, len(c.KeyRef) > 0) > 1 { - return fmt.Errorf("at most one of key and key_file must be configured") + return errors.New("at most one of key and key_file must be configured") } if c.usingClientCert() && !c.usingClientKey() { - return fmt.Errorf("exactly one of key or key_file must be configured when a client certificate is configured") + return errors.New("exactly one of key or key_file must be configured when a client certificate is configured") } else if c.usingClientKey() && !c.usingClientCert() { - return fmt.Errorf("exactly one of cert or cert_file must be configured when a client key is configured") + return errors.New("exactly one of cert or cert_file must be configured when a client key is configured") } return nil @@ -1259,6 +1259,10 @@ type TLSRoundTripperSettings struct { Key SecretReader } +func (t *TLSRoundTripperSettings) immutable() bool { + return (t.CA == nil || t.CA.Immutable()) && (t.Cert == nil || t.Cert.Immutable()) && (t.Key == nil || t.Key.Immutable()) +} + func NewTLSRoundTripper( cfg *tls.Config, settings TLSRoundTripperSettings, @@ -1456,16 +1460,16 @@ type ProxyConfig struct { // UnmarshalYAML implements the yaml.Unmarshaler interface. func (c *ProxyConfig) Validate() error { if len(c.ProxyConnectHeader) > 0 && (!c.ProxyFromEnvironment && (c.ProxyURL.URL == nil || c.ProxyURL.String() == "")) { - return fmt.Errorf("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") + return errors.New("if proxy_connect_header is configured, proxy_url or proxy_from_environment must also be configured") } if c.ProxyFromEnvironment && c.ProxyURL.URL != nil && c.ProxyURL.String() != "" { - return fmt.Errorf("if proxy_from_environment is configured, proxy_url must not be configured") + return errors.New("if proxy_from_environment is configured, proxy_url must not be configured") } if c.ProxyFromEnvironment && c.NoProxy != "" { - return fmt.Errorf("if proxy_from_environment is configured, no_proxy must not be configured") + return errors.New("if proxy_from_environment is configured, no_proxy must not be configured") } if c.ProxyURL.URL == nil && c.NoProxy != "" { - return fmt.Errorf("if no_proxy is configured, proxy_url must also be configured") + return errors.New("if no_proxy is configured, proxy_url must also be configured") } return nil } diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index cf0c150c2e..d7f3d76f55 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -68,7 +68,7 @@ func Negotiate(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } @@ -101,7 +101,7 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" { switch Format(escapeParam) { case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: - escapingScheme = Format(fmt.Sprintf("; escaping=%s", escapeParam)) + escapingScheme = Format("; escaping=" + escapeParam) default: // If the escaping parameter is unknown, ignore it. } diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index d942af8edd..b26886560d 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,7 +15,7 @@ package expfmt import ( - "fmt" + "errors" "strings" "github.com/prometheus/common/model" @@ -109,7 +109,7 @@ func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_1_0_0 { return FmtOpenMetrics_1_0_0, nil } - return FmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, errors.New("unknown open metrics version string") } // WithEscapingScheme returns a copy of Format with the specified escaping diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 11c8ff4b9d..f1c495dd60 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -152,8 +152,8 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...E if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { compliantName = name[:len(name)-6] } - if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { - compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) { + compliantName = compliantName + "_" + *in.Unit } // Comments, first HELP, then TYPE. diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index f085a923f6..b4607fe4d2 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -895,7 +895,7 @@ func histogramMetricName(name string) string { func parseFloat(s string) (float64, error) { if strings.ContainsAny(s, "pP_") { - return 0, fmt.Errorf("unsupported character in float") + return 0, errors.New("unsupported character in float") } return strconv.ParseFloat(s, 64) } diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 80d1fe944e..bd3a39e3e1 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -14,6 +14,7 @@ package model import ( + "errors" "fmt" "time" ) @@ -89,16 +90,16 @@ func (a *Alert) StatusAt(ts time.Time) AlertStatus { // Validate checks whether the alert data is inconsistent. func (a *Alert) Validate() error { if a.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if err := a.Labels.Validate(); err != nil { return fmt.Errorf("invalid label set: %w", err) } if len(a.Labels) == 0 { - return fmt.Errorf("at least one label pair required") + return errors.New("at least one label pair required") } if err := a.Annotations.Validate(); err != nil { return fmt.Errorf("invalid annotations: %w", err) diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index f50966bc49..0daca836af 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -14,9 +14,11 @@ package model import ( + "errors" "fmt" "regexp" "sort" + "strconv" "strings" "unicode/utf8" @@ -269,10 +271,6 @@ func metricNeedsEscaping(m *dto.Metric) bool { return false } -const ( - lowerhex = "0123456789abcdef" -) - // EscapeName escapes the incoming name according to the provided escaping // scheme. Depending on the rules of escaping, this may cause no change in the // string that is returned. (Especially NoEscaping, which by definition is a @@ -307,7 +305,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else { - escaped.WriteRune('_') + escaped.WriteString("__") } } return escaped.String() @@ -317,21 +315,15 @@ func EscapeName(name string, scheme EscapingScheme) string { } escaped.WriteString("U__") for i, b := range name { - if isValidLegacyRune(b, i) { + if b == '_' { + escaped.WriteString("__") + } else if isValidLegacyRune(b, i) { escaped.WriteRune(b) } else if !utf8.ValidRune(b) { escaped.WriteString("_FFFD_") - } else if b < 0x100 { - escaped.WriteRune('_') - for s := 4; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } - escaped.WriteRune('_') - } else if b < 0x10000 { + } else { escaped.WriteRune('_') - for s := 12; s >= 0; s -= 4 { - escaped.WriteByte(lowerhex[b>>uint(s)&0xF]) - } + escaped.WriteString(strconv.FormatInt(int64(b), 16)) escaped.WriteRune('_') } } @@ -389,8 +381,9 @@ func UnescapeName(name string, scheme EscapingScheme) string { // We think we are in a UTF-8 code, process it. var utf8Val uint for j := 0; i < len(escapedName); j++ { - // This is too many characters for a utf8 value. - if j > 4 { + // This is too many characters for a utf8 value based on the MaxRune + // value of '\U0010FFFF'. + if j >= 6 { return name } // Found a closing underscore, convert to a rune, check validity, and append. @@ -443,7 +436,7 @@ func (e EscapingScheme) String() string { func ToEscapingScheme(s string) (EscapingScheme, error) { if s == "" { - return NoEscaping, fmt.Errorf("got empty string instead of escaping scheme") + return NoEscaping, errors.New("got empty string instead of escaping scheme") } switch s { case AllowUTF8: diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 910b0b71fc..8f91a9702e 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "regexp" "time" @@ -34,7 +35,7 @@ func (m *Matcher) UnmarshalJSON(b []byte) error { } if len(m.Name) == 0 { - return fmt.Errorf("label name in matcher must not be empty") + return errors.New("label name in matcher must not be empty") } if m.IsRegex { if _, err := regexp.Compile(m.Value); err != nil { @@ -77,7 +78,7 @@ type Silence struct { // Validate returns true iff all fields of the silence have valid values. func (s *Silence) Validate() error { if len(s.Matchers) == 0 { - return fmt.Errorf("at least one matcher required") + return errors.New("at least one matcher required") } for _, m := range s.Matchers { if err := m.Validate(); err != nil { @@ -85,22 +86,22 @@ func (s *Silence) Validate() error { } } if s.StartsAt.IsZero() { - return fmt.Errorf("start time missing") + return errors.New("start time missing") } if s.EndsAt.IsZero() { - return fmt.Errorf("end time missing") + return errors.New("end time missing") } if s.EndsAt.Before(s.StartsAt) { - return fmt.Errorf("start time must be before end time") + return errors.New("start time must be before end time") } if s.CreatedBy == "" { - return fmt.Errorf("creator information missing") + return errors.New("creator information missing") } if s.Comment == "" { - return fmt.Errorf("comment missing") + return errors.New("comment missing") } if s.CreatedAt.IsZero() { - return fmt.Errorf("creation timestamp missing") + return errors.New("creation timestamp missing") } return nil } diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go index ae35cc2ab4..6bfc757d18 100644 --- a/vendor/github.com/prometheus/common/model/value_float.go +++ b/vendor/github.com/prometheus/common/model/value_float.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "math" "strconv" @@ -39,7 +40,7 @@ func (v SampleValue) MarshalJSON() ([]byte, error) { // UnmarshalJSON implements json.Unmarshaler. func (v *SampleValue) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("sample value must be a quoted string") + return errors.New("sample value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go index 54bb038cff..895e6a3e83 100644 --- a/vendor/github.com/prometheus/common/model/value_histogram.go +++ b/vendor/github.com/prometheus/common/model/value_histogram.go @@ -15,6 +15,7 @@ package model import ( "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -32,7 +33,7 @@ func (v FloatString) MarshalJSON() ([]byte, error) { func (v *FloatString) UnmarshalJSON(b []byte) error { if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { - return fmt.Errorf("float value must be a quoted string") + return errors.New("float value must be a quoted string") } f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) if err != nil { @@ -141,7 +142,7 @@ type SampleHistogramPair struct { func (s SampleHistogramPair) MarshalJSON() ([]byte, error) { if s.Histogram == nil { - return nil, fmt.Errorf("histogram is nil") + return nil, errors.New("histogram is nil") } t, err := json.Marshal(s.Timestamp) if err != nil { @@ -164,7 +165,7 @@ func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error { return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen) } if s.Histogram == nil { - return fmt.Errorf("histogram is null") + return errors.New("histogram is null") } return nil } diff --git a/vendor/github.com/prometheus/common/version/info.go b/vendor/github.com/prometheus/common/version/info.go index 197d95e5c8..61ed1ba314 100644 --- a/vendor/github.com/prometheus/common/version/info.go +++ b/vendor/github.com/prometheus/common/version/info.go @@ -90,6 +90,14 @@ func GetTags() string { return computedTags } +func PrometheusUserAgent() string { + return ComponentUserAgent("Prometheus") +} + +func ComponentUserAgent(component string) string { + return component + "/" + Version +} + func init() { computedRevision, computedTags = computeRevision() } diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index 0e0ba4c035..958cf38deb 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -180,7 +180,9 @@ func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, s // traverse builds the table of events representing a traversal. func traverse(files []*ast.File) []event { // Preallocate approximate number of events - // based on source file extent. + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) // This makes traverse faster by 4x (!). var extent int for _, f := range files { diff --git a/vendor/modules.txt b/vendor/modules.txt index 2d0f976a14..96df8574ff 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -74,7 +74,7 @@ github.com/Microsoft/go-winio/pkg/guid # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler -# github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 +# github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b ## explicit; go 1.15 github.com/alecthomas/units # github.com/antlr4-go/antlr/v4 v4.13.0 @@ -83,7 +83,7 @@ github.com/antlr4-go/antlr/v4 # github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 ## explicit; go 1.13 github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.54.19 +# github.com/aws/aws-sdk-go v1.55.5 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/auth/bearer @@ -211,8 +211,8 @@ github.com/go-logr/logr/funcr # github.com/go-logr/stdr v1.2.2 ## explicit; go 1.16 github.com/go-logr/stdr -# github.com/go-openapi/analysis v0.22.2 -## explicit; go 1.19 +# github.com/go-openapi/analysis v0.23.0 +## explicit; go 1.20 github.com/go-openapi/analysis github.com/go-openapi/analysis/internal/debug github.com/go-openapi/analysis/internal/flatten/normalize @@ -230,11 +230,11 @@ github.com/go-openapi/jsonpointer ## explicit; go 1.20 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/loads v0.21.5 -## explicit; go 1.19 +# github.com/go-openapi/loads v0.22.0 +## explicit; go 1.20 github.com/go-openapi/loads -# github.com/go-openapi/spec v0.20.14 -## explicit; go 1.19 +# github.com/go-openapi/spec v0.21.0 +## explicit; go 1.20 github.com/go-openapi/spec # github.com/go-openapi/strfmt v0.23.0 ## explicit; go 1.20 @@ -242,8 +242,8 @@ github.com/go-openapi/strfmt # github.com/go-openapi/swag v0.23.0 ## explicit; go 1.20 github.com/go-openapi/swag -# github.com/go-openapi/validate v0.23.0 -## explicit; go 1.19 +# github.com/go-openapi/validate v0.24.0 +## explicit; go 1.20 github.com/go-openapi/validate # github.com/go-task/slim-sprig/v3 v3.0.0 ## explicit; go 1.20 @@ -739,10 +739,10 @@ github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/mo github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1alpha1/fake github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1beta1 github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1beta1/fake -# github.com/prometheus/alertmanager v0.27.0 -## explicit; go 1.21 +# github.com/prometheus/alertmanager v0.28.1 +## explicit; go 1.22.0 github.com/prometheus/alertmanager/api/v2/models -# github.com/prometheus/client_golang v1.20.4 +# github.com/prometheus/client_golang v1.20.5 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -756,7 +756,7 @@ github.com/prometheus/client_golang/prometheus/testutil/promlint/validations # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.60.0 +# github.com/prometheus/common v0.61.0 ## explicit; go 1.21 github.com/prometheus/common/config github.com/prometheus/common/expfmt @@ -1026,7 +1026,7 @@ golang.org/x/text/unicode/norm # golang.org/x/time v0.9.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.26.0 +# golang.org/x/tools v0.28.0 ## explicit; go 1.22.0 golang.org/x/tools/cover golang.org/x/tools/go/ast/inspector From 6ca1af7dff26a74d990498ff16a90ad90038e8e6 Mon Sep 17 00:00:00 2001 From: Simon Pasquier Date: Wed, 17 Sep 2025 15:32:47 +0200 Subject: [PATCH 4/6] chore: bump prometheus-operator client Signed-off-by: Simon Pasquier --- Documentation/api.md | 84 +- go.mod | 46 +- go.sum | 92 +- .../emicklei/go-restful/v3/CHANGES.md | 5 +- .../emicklei/go-restful/v3/README.md | 2 +- .../emicklei/go-restful/v3/jsr311.go | 19 +- .../emicklei/go-restful/v3/route.go | 2 + vendor/github.com/fxamacker/cbor/v2/README.md | 408 +++-- .../fxamacker/cbor/v2/bytestring.go | 27 + vendor/github.com/fxamacker/cbor/v2/cache.go | 22 +- vendor/github.com/fxamacker/cbor/v2/decode.go | 102 +- vendor/github.com/fxamacker/cbor/v2/doc.go | 51 +- vendor/github.com/fxamacker/cbor/v2/encode.go | 191 ++- .../fxamacker/cbor/v2/encode_map.go | 10 +- .../fxamacker/cbor/v2/encode_map_go117.go | 60 - .../fxamacker/cbor/v2/omitzero_go124.go | 8 + .../fxamacker/cbor/v2/omitzero_pre_go124.go | 8 + .../fxamacker/cbor/v2/simplevalue.go | 29 + vendor/github.com/fxamacker/cbor/v2/stream.go | 4 +- .../fxamacker/cbor/v2/structfields.go | 15 +- vendor/github.com/fxamacker/cbor/v2/tag.go | 35 +- .../go-openapi/jsonpointer/.golangci.yml | 31 +- .../go-openapi/jsonpointer/errors.go | 18 + .../go-openapi/jsonpointer/pointer.go | 49 +- .../github.com/go-openapi/swag/.golangci.yml | 34 +- vendor/github.com/go-openapi/swag/errors.go | 15 + vendor/github.com/go-openapi/swag/json.go | 3 +- vendor/github.com/go-openapi/swag/loading.go | 2 +- vendor/github.com/go-openapi/swag/yaml.go | 32 +- .../apis/monitoring/v1/alertmanager_types.go | 17 + .../apis/monitoring/v1/prometheus_types.go | 55 +- .../pkg/apis/monitoring/v1/thanos_types.go | 33 +- .../pkg/apis/monitoring/v1/types.go | 24 +- .../monitoring/v1/zz_generated.deepcopy.go | 59 +- .../v1alpha1/alertmanager_config_types.go | 46 +- .../v1alpha1/zz_generated.deepcopy.go | 67 + .../v1beta1/alertmanager_config_types.go | 46 +- .../monitoring/v1beta1/conversion_from.go | 21 + .../apis/monitoring/v1beta1/conversion_to.go | 21 + .../pkg/apis/monitoring/v1beta1/validation.go | 1 - .../v1beta1/zz_generated.deepcopy.go | 68 + .../monitoring/v1/alertmanagerspec.go | 23 + .../monitoring/v1/commonprometheusfields.go | 9 + .../monitoring/v1/metadataconfig.go | 13 +- .../monitoring/v1/nativehistogramconfig.go | 9 + .../monitoring/v1/podmonitorspec.go | 8 + .../monitoring/v1/probespec.go | 8 + .../monitoring/v1/prometheusspec.go | 8 + .../monitoring/v1/retainconfig.go | 41 + .../monitoring/v1/servicemonitorspec.go | 8 + .../monitoring/v1/shardretentionpolicy.go | 9 + .../monitoring/v1/thanosrulerspec.go | 23 + .../monitoring/v1alpha1/discordconfig.go | 28 + .../monitoring/v1alpha1/msteamsv2config.go | 77 + .../v1alpha1/prometheusagentspec.go | 8 + .../monitoring/v1alpha1/receiver.go | 14 + .../monitoring/v1alpha1/scrapeconfigspec.go | 8 + .../monitoring/v1alpha1/webhookconfig.go | 10 + .../monitoring/v1beta1/discordconfig.go | 28 + .../monitoring/v1beta1/msteamsv2config.go | 77 + .../monitoring/v1beta1/receiver.go | 14 + .../monitoring/v1beta1/webhookconfig.go | 13 + .../pkg/client/applyconfiguration/utils.go | 6 + vendor/golang.org/x/crypto/cryptobyte/asn1.go | 2 +- .../x/crypto/internal/poly1305/mac_noasm.go | 2 +- .../poly1305/{sum_amd64.go => sum_asm.go} | 2 +- .../x/crypto/internal/poly1305/sum_loong64.s | 123 ++ .../x/crypto/internal/poly1305/sum_ppc64x.go | 47 - vendor/golang.org/x/crypto/ssh/handshake.go | 3 +- vendor/golang.org/x/oauth2/oauth2.go | 8 +- vendor/golang.org/x/oauth2/pkce.go | 4 +- vendor/golang.org/x/oauth2/token.go | 2 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 3 +- vendor/golang.org/x/sys/cpu/cpu.go | 12 + .../golang.org/x/sys/cpu/cpu_linux_loong64.go | 22 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 2 +- vendor/golang.org/x/sys/cpu/cpu_loong64.go | 38 + vendor/golang.org/x/sys/cpu/cpu_loong64.s | 13 + vendor/golang.org/x/sys/cpu/parse.go | 4 +- .../golang.org/x/sys/unix/syscall_darwin.go | 149 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 42 +- .../x/sys/unix/zsyscall_darwin_amd64.go | 84 + .../x/sys/unix/zsyscall_darwin_amd64.s | 20 + .../x/sys/unix/zsyscall_darwin_arm64.go | 84 + .../x/sys/unix/zsyscall_darwin_arm64.s | 20 + .../golang.org/x/sys/windows/registry/key.go | 13 +- .../x/sys/windows/registry/value.go | 6 +- .../golang.org/x/sys/windows/types_windows.go | 27 + vendor/golang.org/x/term/terminal.go | 9 + vendor/golang.org/x/time/rate/rate.go | 28 +- .../editiondefaults/editions_defaults.binpb | Bin 138 -> 146 bytes .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/genid/descriptor_gen.go | 16 + ...ings_unsafe_go121.go => strings_unsafe.go} | 2 - .../internal/strs/strings_unsafe_go120.go | 94 -- .../protobuf/internal/version/version.go | 2 +- .../google.golang.org/protobuf/proto/merge.go | 6 + .../reflect/protoreflect/source_gen.go | 2 + ...{value_unsafe_go121.go => value_unsafe.go} | 2 - .../protoreflect/value_unsafe_go120.go | 98 -- .../types/descriptorpb/descriptor.pb.go | 1439 +++++++---------- .../types/gofeaturespb/go_features.pb.go | 80 +- .../protobuf/types/known/anypb/any.pb.go | 24 +- .../types/known/durationpb/duration.pb.go | 25 +- .../protobuf/types/known/emptypb/empty.pb.go | 20 +- .../types/known/fieldmaskpb/field_mask.pb.go | 23 +- .../types/known/structpb/struct.pb.go | 74 +- .../types/known/timestamppb/timestamp.pb.go | 25 +- .../types/known/wrapperspb/wrappers.pb.go | 103 +- vendor/k8s.io/utils/buffer/ring_growing.go | 10 + vendor/modules.txt | 58 +- .../v4/typed/validate.go | 4 +- .../v4/value/jsontagutil.go | 63 +- .../v4/value/reflectcache.go | 14 +- 114 files changed, 3222 insertions(+), 2038 deletions(-) delete mode 100644 vendor/github.com/fxamacker/cbor/v2/encode_map_go117.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_go124.go create mode 100644 vendor/github.com/fxamacker/cbor/v2/omitzero_pre_go124.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/errors.go create mode 100644 vendor/github.com/go-openapi/swag/errors.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1/retainconfig.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1alpha1/msteamsv2config.go create mode 100644 vendor/github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1beta1/msteamsv2config.go rename vendor/golang.org/x/crypto/internal/poly1305/{sum_amd64.go => sum_asm.go} (94%) create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s delete mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_loong64.s rename vendor/google.golang.org/protobuf/internal/strs/{strings_unsafe_go121.go => strings_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go rename vendor/google.golang.org/protobuf/reflect/protoreflect/{value_unsafe_go121.go => value_unsafe.go} (99%) delete mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go diff --git a/Documentation/api.md b/Documentation/api.md index 7af2f31c0d..4f8a1d2ab1 100644 --- a/Documentation/api.md +++ b/Documentation/api.md @@ -64,7 +64,7 @@ The `AdditionalAlertmanagerConfig` resource defines settings for how a component | Property | Type | Description | | -------- | ---- | ----------- | | apiVersion | string | Defines the API version of Alertmanager. `v1` is no longer supported, `v2` is set as the default value. | -| bearerToken | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | Defines the secret key reference containing the bearer token to use when authenticating to Alertmanager. | +| bearerToken | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core) | Defines the secret key reference containing the bearer token to use when authenticating to Alertmanager. | | pathPrefix | string | Defines the path prefix to add in front of the push endpoint path. | | scheme | string | Defines the URL scheme to use when communicating with Alertmanager instances. Possible values are `http` or `https`. The default value is `http`. | | staticConfigs | []string | A list of statically configured Alertmanager endpoints in the form of `:`. | @@ -88,11 +88,11 @@ The `AlertmanagerMainConfig` resource defines settings for the Alertmanager comp | enableUserAlertmanagerConfig | bool | A Boolean flag that enables or disables user-defined namespaces to be selected for `AlertmanagerConfig` lookups. This setting only applies if the user workload monitoring instance of Alertmanager is not enabled. The default value is `false`. | | logLevel | string | Defines the log level setting for Alertmanager. The possible values are: `error`, `warn`, `info`, `debug`. The default value is `info`. | | nodeSelector | map[string]string | Defines the nodes on which the Pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Alertmanager container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Alertmanager container. | | secrets | []string | Defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | -| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. | +| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size, and name. | [Back to TOC](#table-of-contents) @@ -110,12 +110,12 @@ The `AlertmanagerUserWorkloadConfig` resource defines the settings for the Alert | enabled | bool | A Boolean flag that enables or disables a dedicated instance of Alertmanager for user-defined alerts in the `openshift-user-workload-monitoring` namespace. The default value is `false`. | | enableAlertmanagerConfig | bool | A Boolean flag to enable or disable user-defined namespaces to be selected for `AlertmanagerConfig` lookup. The default value is `false`. | | logLevel | string | Defines the log level setting for Alertmanager for user workload monitoring. The possible values are `error`, `warn`, `info`, and `debug`. The default value is `info`. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Alertmanager container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Alertmanager container. | | secrets | []string | Defines a list of secrets that need to be mounted into the Alertmanager. The secrets must reside within the same namespace as the Alertmanager object. They will be added as volumes named secret- and mounted at /etc/alertmanager/secrets/ within the 'alertmanager' container of the Alertmanager Pods. | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | -| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size and name. | +| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Alertmanager. Use this setting to configure the persistent volume claim, including storage class, volume size and name. | [Back to TOC](#table-of-contents) @@ -153,8 +153,8 @@ The `K8sPrometheusAdapter` resource defines settings for the Prometheus Adapter | -------- | ---- | ----------- | | audit | *Audit | Defines the audit configuration used by the Prometheus Adapter instance. Possible profile values are: `metadata`, `request`, `requestresponse`, and `none`. The default value is `metadata`. | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the PrometheusAdapter container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the PrometheusAdapter container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -171,8 +171,8 @@ The `KubeStateMetricsConfig` resource defines settings for the `kube-state-metri | Property | Type | Description | | -------- | ---- | ----------- | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the KubeStateMetrics container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the KubeStateMetrics container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -190,9 +190,9 @@ The `MetricsServerConfig` resource defines settings for the Metrics Server compo | -------- | ---- | ----------- | | audit | *Audit | Defines the audit configuration used by the Metrics Server instance. Possible profile values are: `metadata`, `request`, `requestresponse`, and `none`. The default value is `metadata`. | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | verbosity | uint8 | Defines the verbosity of log messages for Metrics Server. Valid values are positive integers, values over 10 are usually unnecessary. The default value is `0`. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Metrics Server container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Metrics Server container. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -209,8 +209,8 @@ The `MonitoringPluginConfig` resource defines settings for the Console Plugin co | Property | Type | Description | | -------- | ---- | ----------- | | nodeSelector | map[string]string | Defines the nodes on which the Pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the console-plugin container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the console-plugin container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -389,7 +389,7 @@ The `NodeExporterConfig` resource defines settings for the `node-exporter` agent | collectors | [NodeExporterCollectorConfig](#nodeexportercollectorconfig) | Defines which collectors are enabled and their additional configuration parameters. | | maxProcs | uint32 | The target number of CPUs on which the Node Exporter's process will run. Use this setting to override the default value, which is set either to `4` or to the number of CPUs on the host, whichever is smaller. The default value is computed at runtime and set via the `GOMAXPROCS` environment variable before Node Exporter is launched. If a kernel deadlock occurs or if performance degrades when reading from `sysfs` concurrently, you can change this value to `1`, which limits Node Exporter to running on one CPU. For nodes with a high CPU count, setting the limit to a low number saves resources by preventing Go routines from being scheduled to run on all CPUs. However, I/O performance degrades if the `maxProcs` value is set too low, and there are many metrics to collect. | | ignoredNetworkDevices | *[]string | A list of network devices, as regular expressions, to be excluded from the relevant collector configuration such as `netdev` and `netclass`. When not set, the Cluster Monitoring Operator uses a predefined list of devices to be excluded to minimize the impact on memory usage. When set as an empty list, no devices are excluded. If you modify this setting, monitor the `prometheus-k8s` deployment closely for excessive memory usage. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the NodeExporter container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the NodeExporter container. | [Back to TOC](#table-of-contents) @@ -405,8 +405,8 @@ The `OpenShiftStateMetricsConfig` resource defines settings for the `openshift-s | Property | Type | Description | | -------- | ---- | ----------- | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the OpenShiftStateMetrics container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the OpenShiftStateMetrics container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -429,13 +429,13 @@ The `PrometheusK8sConfig` resource defines settings for the Prometheus component | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | | queryLogFile | string | Specifies the file to which PromQL queries are logged. This setting can be either a filename, in which case the queries are saved to an `emptyDir` volume at `/var/log/prometheus`, or a full path to a location where an `emptyDir` volume will be mounted and the queries saved. Writing to `/dev/stderr`, `/dev/stdout` or `/dev/null` is supported, but writing to any other `/dev/` path is not supported. Relative paths are also not supported. By default, PromQL queries are not logged. | | remoteWrite | [][RemoteWriteSpec](#remotewritespec) | Defines the remote write configuration, including URL, authentication, and relabeling settings. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Prometheus container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Prometheus container. | | retention | string | Defines the duration for which Prometheus retains data. This definition must be specified using the following regular expression pattern: `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (ms = milliseconds, s= seconds,m = minutes, h = hours, d = days, w = weeks, y = years). The default value is `15d`. | | retentionSize | string | Defines the maximum amount of disk space used by data blocks plus the write-ahead log (WAL). Supported values are `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`, `EB`, and `EiB`. By default, no limit is defined. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines the pod's topology spread constraints. | | collectionProfile | CollectionProfile | Defines the metrics collection profile that Prometheus uses to collect metrics from the platform components. Supported values are `full` or `minimal`. In the `full` profile (default), Prometheus collects all metrics that are exposed by the platform components. In the `minimal` profile, Prometheus only collects metrics necessary for the default platform alerts, recording rules, telemetry and console dashboards. | -| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Prometheus. Use this setting to configure the persistent volume claim, including storage class, volume size and name. | +| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Prometheus. Use this setting to configure the persistent volume claim, including storage class, volume size and name. | [Back to TOC](#table-of-contents) @@ -450,7 +450,7 @@ The `PrometheusOperatorAdmissionWebhookConfig` resource defines settings for the | Property | Type | Description | | -------- | ---- | ----------- | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the prometheus-operator-admission-webhook container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the prometheus-operator-admission-webhook container. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -468,8 +468,8 @@ The `PrometheusOperatorConfig` resource defines settings for the Prometheus Oper | -------- | ---- | ----------- | | logLevel | string | Defines the log level settings for Prometheus Operator. The possible values are `error`, `warn`, `info`, and `debug`. The default value is `info`. | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the PrometheusOperator container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the PrometheusOperator container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -498,12 +498,12 @@ The `PrometheusRestrictedConfig` resource defines the settings for the Prometheu | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | | queryLogFile | string | Specifies the file to which PromQL queries are logged. This setting can be either a filename, in which case the queries are saved to an `emptyDir` volume at `/var/log/prometheus`, or a full path to a location where an `emptyDir` volume will be mounted and the queries saved. Writing to `/dev/stderr`, `/dev/stdout` or `/dev/null` is supported, but writing to any other `/dev/` path is not supported. Relative paths are also not supported. By default, PromQL queries are not logged. | | remoteWrite | [][RemoteWriteSpec](#remotewritespec) | Defines the remote write configuration, including URL, authentication, and relabeling settings. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Prometheus container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Prometheus container. | | retention | string | Defines the duration for which Prometheus retains data. This definition must be specified using the following regular expression pattern: `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (ms = milliseconds, s= seconds,m = minutes, h = hours, d = days, w = weeks, y = years). The default value is `24h`. | | retentionSize | string | Defines the maximum amount of disk space used by data blocks plus the write-ahead log (WAL). Supported values are `B`, `KB`, `KiB`, `MB`, `MiB`, `GB`, `GiB`, `TB`, `TiB`, `PB`, `PiB`, `EB`, and `EiB`. The default value is `nil`. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | -| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Prometheus. Use this setting to configure the storage class and size of a volume. | +| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Prometheus. Use this setting to configure the storage class and size of a volume. | [Back to TOC](#table-of-contents) @@ -521,20 +521,20 @@ The `RemoteWriteSpec` resource defines the settings for remote write storage. | Property | Type | Description | | -------- | ---- | ----------- | | authorization | *monv1.SafeAuthorization | Defines the authorization settings for remote write storage. | -| basicAuth | *[monv1.BasicAuth](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#basicauth) | Defines basic authentication settings for the remote write endpoint URL. | +| basicAuth | *[monv1.BasicAuth](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#basicauth) | Defines basic authentication settings for the remote write endpoint URL. | | bearerTokenFile | string | Defines the file that contains the bearer token for the remote write endpoint. However, because you cannot mount secrets in a pod, in practice you can only reference the token of the service account. | | headers | map[string]string | Specifies the custom HTTP headers to be sent along with each remote write request. Headers set by Prometheus cannot be overwritten. | -| metadataConfig | *[monv1.MetadataConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#metadataconfig) | Defines settings for sending series metadata to remote write storage. | +| metadataConfig | *[monv1.MetadataConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#metadataconfig) | Defines settings for sending series metadata to remote write storage. | | name | string | Defines the name of the remote write queue. This name is used in metrics and logging to differentiate queues. If specified, this name must be unique. | | oauth2 | *monv1.OAuth2 | Defines OAuth2 authentication settings for the remote write endpoint. | | proxyUrl | string | Defines an optional proxy URL. If the cluster-wide proxy is enabled, it replaces the proxyUrl setting. The cluster-wide proxy supports both HTTP and HTTPS proxies, with HTTPS taking precedence. | -| queueConfig | *[monv1.QueueConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#queueconfig) | Allows tuning configuration for remote write queue parameters. | +| queueConfig | *[monv1.QueueConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#queueconfig) | Allows tuning configuration for remote write queue parameters. | | remoteTimeout | string | Defines the timeout value for requests to the remote write endpoint. | | sendExemplars | *bool | Enables sending exemplars via remote write. When enabled, Prometheus is configured to store a maximum of 100,000 exemplars in memory. Note that this setting only applies to user-defined monitoring. It is not applicable to default in-cluster monitoring. | | sigv4 | *monv1.Sigv4 | Defines AWS Signature Version 4 authentication settings. | -| tlsConfig | *[monv1.SafeTLSConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#safetlsconfig) | Defines TLS authentication settings for the remote write endpoint. | +| tlsConfig | *[monv1.SafeTLSConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#safetlsconfig) | Defines TLS authentication settings for the remote write endpoint. | | url | string | Defines the URL of the remote write endpoint to which samples will be sent. | -| writeRelabelConfigs | [][monv1.RelabelConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#relabelconfig) | Defines the list of remote write relabel configurations. | +| writeRelabelConfigs | [][monv1.RelabelConfig](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#relabelconfig) | Defines the list of remote write relabel configurations. | [Back to TOC](#table-of-contents) @@ -551,9 +551,9 @@ The `TLSConfig` resource configures the settings for TLS connections. | Property | Type | Description | | -------- | ---- | ----------- | -| ca | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | Defines the secret key reference containing the Certificate Authority (CA) to use for the remote host. | -| cert | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | Defines the secret key reference containing the public certificate to use for the remote host. | -| key | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#secretkeyselector-v1-core) | Defines the secret key reference containing the private key to use for the remote host. | +| ca | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core) | Defines the secret key reference containing the Certificate Authority (CA) to use for the remote host. | +| cert | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core) | Defines the secret key reference containing the public certificate to use for the remote host. | +| key | *[v1.SecretKeySelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#secretkeyselector-v1-core) | Defines the secret key reference containing the private key to use for the remote host. | | serverName | string | Used to verify the hostname on the returned certificate. | | insecureSkipVerify | bool | When set to `true`, disables the verification of the remote host's certificate and name. | @@ -574,8 +574,8 @@ The `TLSConfig` resource configures the settings for TLS connections. | Property | Type | Description | | -------- | ---- | ----------- | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the TelemeterClient container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the TelemeterClient container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -595,8 +595,8 @@ The `ThanosQuerierConfig` resource defines settings for the Thanos Querier compo | logLevel | string | Defines the log level setting for Thanos Querier. The possible values are `error`, `warn`, `info`, and `debug`. The default value is `info`. | | enableCORS | bool | A Boolean flag that enables setting CORS headers. The headers would allow access from any origin. The default value is `false`. | | nodeSelector | map[string]string | Defines the nodes on which the pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Thanos Querier container. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Thanos Querier container. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines a pod's topology spread constraints. | [Back to TOC](#table-of-contents) @@ -616,11 +616,11 @@ The `ThanosRulerConfig` resource defines configuration for the Thanos Ruler inst | evaluationInterval | string | Configures the default interval between Prometheus rule evaluations in case the `PrometheusRule` resource does not specify any value. The interval must be set between 5 seconds and 5 minutes. The value can be expressed in: seconds (for example `30s`.), minutes (for example `1m`.) or a mix of minutes and seconds (for example `1m30s`.). It applies to `PrometheusRule` resources without the `openshift.io/prometheus-rule-evaluation-scope=\"leaf-prometheus\"` label. The default value is `15s`. | | logLevel | string | Defines the log level setting for Thanos Ruler. The possible values are `error`, `warn`, `info`, and `debug`. The default value is `info`. | | nodeSelector | map[string]string | Defines the nodes on which the Pods are scheduled. | -| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core) | Defines resource requests and limits for the Thanos Ruler container. | +| resources | *[v1.ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#resourcerequirements-v1-core) | Defines resource requests and limits for the Thanos Ruler container. | | retention | string | Defines the duration for which Prometheus retains data. This definition must be specified using the following regular expression pattern: `[0-9]+(ms\|s\|m\|h\|d\|w\|y)` (ms = milliseconds, s= seconds,m = minutes, h = hours, d = days, w = weeks, y = years). The default value is `24h`. | -| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) | Defines tolerations for the pods. | +| tolerations | [][v1.Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#toleration-v1-core) | Defines tolerations for the pods. | | topologySpreadConstraints | []v1.TopologySpreadConstraint | Defines topology spread constraints for the pods. | -| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.81.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Thanos Ruler. Use this setting to configure the storage class and size of a volume. | +| volumeClaimTemplate | *[monv1.EmbeddedPersistentVolumeClaim](https://github.com/prometheus-operator/prometheus-operator/blob/v0.82.0/Documentation/api.md#embeddedpersistentvolumeclaim) | Defines persistent storage for Thanos Ruler. Use this setting to configure the storage class and size of a volume. | [Back to TOC](#table-of-contents) diff --git a/go.mod b/go.mod index c3875adeec..fbaa169c39 100644 --- a/go.mod +++ b/go.mod @@ -19,8 +19,8 @@ require ( github.com/openshift/client-go v0.0.0-20250125113824-8e1f0b8fa9a7 github.com/openshift/library-go v0.0.0-20250402180609-ce2ba53fb2a4 github.com/pborman/uuid v1.2.1 - github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0 - github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0 + github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0 + github.com/prometheus-operator/prometheus-operator/pkg/client v0.82.0 github.com/prometheus/alertmanager v0.28.1 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/common v0.61.0 @@ -28,24 +28,24 @@ require ( github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 github.com/tidwall/gjson v1.18.0 - golang.org/x/net v0.38.0 - golang.org/x/sync v0.12.0 - golang.org/x/text v0.23.0 + golang.org/x/net v0.39.0 + golang.org/x/sync v0.13.0 + golang.org/x/text v0.24.0 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.2 - k8s.io/apiextensions-apiserver v0.32.2 + k8s.io/apiextensions-apiserver v0.32.3 k8s.io/apimachinery v0.33.2 - k8s.io/apiserver v0.32.2 + k8s.io/apiserver v0.32.3 k8s.io/autoscaler/vertical-pod-autoscaler v1.0.0 - k8s.io/client-go v0.32.2 - k8s.io/component-base v0.32.2 + k8s.io/client-go v0.32.3 + k8s.io/component-base v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/kube-aggregator v0.32.1 k8s.io/kubernetes v1.32.1 k8s.io/metrics v0.31.1 - k8s.io/utils v0.0.0-20241210054802-24370beab758 - sigs.k8s.io/controller-runtime v0.20.3 + k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e + sigs.k8s.io/controller-runtime v0.20.4 sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 sigs.k8s.io/yaml v1.4.0 ) @@ -76,21 +76,21 @@ require ( github.com/dennwc/varint v1.0.0 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/euank/go-kmsg-parser v2.0.0+incompatible // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/fxamacker/cbor/v2 v2.8.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/analysis v0.23.0 // indirect github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonpointer v0.21.1 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/loads v0.22.0 // indirect github.com/go-openapi/spec v0.21.0 // indirect - github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/swag v0.23.1 // indirect github.com/go-openapi/validate v0.24.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect @@ -160,19 +160,19 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect + golang.org/x/crypto v0.37.0 // indirect golang.org/x/exp v0.0.0-20241004190924-225e2abe05e6 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/time v0.9.0 // indirect + golang.org/x/oauth2 v0.29.0 // indirect + golang.org/x/sys v0.32.0 // indirect + golang.org/x/term v0.31.0 // indirect + golang.org/x/time v0.11.0 // indirect golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240924160255-9d4c2d233b61 // indirect google.golang.org/grpc v1.65.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect @@ -183,7 +183,7 @@ require ( k8s.io/cri-client v0.0.0 // indirect k8s.io/csi-translation-lib v0.0.0 // indirect k8s.io/dynamic-resource-allocation v0.0.0 // indirect - k8s.io/kms v0.32.2 // indirect + k8s.io/kms v0.32.3 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/kube-scheduler v0.0.0 // indirect k8s.io/kubectl v0.0.0 // indirect @@ -193,7 +193,7 @@ require ( sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.7.0 // indirect ) // Some utils are only available in the fork https://github.com/openshift-eng/openshift-tests-extension/blob/8ef37c67e666954f9b95e52320c9c20d6b53d241/go.mod#L37 diff --git a/go.sum b/go.sum index ce00bc7bbd..e75e7375fa 100644 --- a/go.sum +++ b/go.sum @@ -131,8 +131,8 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= -github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -151,8 +151,8 @@ github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2 github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= -github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/fxamacker/cbor/v2 v2.8.0 h1:fFtUGXUzXPHTIUdne5+zzMPTfffl3RD5qYnkY40vtxU= +github.com/fxamacker/cbor/v2 v2.8.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -179,8 +179,8 @@ github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC0 github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= -github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= -github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= +github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= @@ -189,8 +189,8 @@ github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9Z github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= -github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= -github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= +github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-resty/resty/v2 v2.13.1 h1:x+LHXBI2nMB1vqndymf26quycC4aggYJ7DECYbiz03g= @@ -475,10 +475,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0 h1:mSii7z+TihzdeULnGjLnNikgtDbeViY/wW8s3430rhE= -github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.81.0/go.mod h1:YfnEQzw7tUQa0Sjiz8V6QFc6JUGE+i5wybsjc3EOKn8= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0 h1:z8ETgiD2hThJi3+3S8eKAbC9/pwPq1kGt8HkeGlDstw= -github.com/prometheus-operator/prometheus-operator/pkg/client v0.81.0/go.mod h1:EKK9z5OIxxwaCZmgaidiIfvy6mF7x8M3AJHmxwx+QB4= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0 h1:Ee6zu4IR/WKYEcYHL4+gbC1A3GAzlHWxSjjMyRVBHYw= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.82.0/go.mod h1:hY5yoQsoIalncoxYqXXCDL5y7f+GGYYlW9Bi2IdU5KY= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.82.0 h1:g/wIdMrRyQ1Fac8plyQMXzky8R9kOi243tQi3a2YkyU= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.82.0/go.mod h1:yEp9v3FEYT+iR2ujaXFVS8ugTIQk0Mx+wwXGXoF5az8= github.com/prometheus/alertmanager v0.28.1 h1:BK5pCoAtaKg01BYRUJhEDV1tqJMEtYBGzPw8QdvnnvA= github.com/prometheus/alertmanager v0.28.1/go.mod h1:0StpPUDDHi1VXeM7p2yYfeZgLVi/PPlt39vo9LQUHxM= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= @@ -620,8 +620,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE= +golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -687,16 +687,16 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= +golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.29.0 h1:WdYw2tdTK1S8olAzWHdgeqfy+Mtm9XNhv/xJsY65d98= +golang.org/x/oauth2 v0.29.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -707,8 +707,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610= +golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -747,24 +747,24 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= +golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o= +golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= +golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -897,8 +897,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -932,39 +932,39 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/apiextensions-apiserver v0.32.2 h1:2YMk285jWMk2188V2AERy5yDwBYrjgWYggscghPCvV4= -k8s.io/apiextensions-apiserver v0.32.2/go.mod h1:GPwf8sph7YlJT3H6aKUWtd0E+oyShk/YHWQHf/OOgCA= +k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= +k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/apiserver v0.32.2 h1:WzyxAu4mvLkQxwD9hGa4ZfExo3yZZaYzoYvvVDlM6vw= -k8s.io/apiserver v0.32.2/go.mod h1:PEwREHiHNU2oFdte7BjzA1ZyjWjuckORLIK/wLV5goM= +k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= +k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= k8s.io/autoscaler/vertical-pod-autoscaler v1.0.0 h1:y0TgWoHaeYEv3L1MfLC+D2WVxyN1fGr6axURHXq+wHE= k8s.io/autoscaler/vertical-pod-autoscaler v1.0.0/go.mod h1:w6/LjLR3DPQd57vlgvgbpzpuJKsCiily0+OzQI+nyfI= -k8s.io/client-go v0.32.2 h1:4dYCD4Nz+9RApM2b/3BtVvBHw54QjMFUl1OLcJG5yOA= -k8s.io/client-go v0.32.2/go.mod h1:fpZ4oJXclZ3r2nDOv+Ux3XcJutfrwjKTCHz2H3sww94= -k8s.io/component-base v0.32.2 h1:1aUL5Vdmu7qNo4ZsE+569PV5zFatM9hl+lb3dEea2zU= -k8s.io/component-base v0.32.2/go.mod h1:PXJ61Vx9Lg+P5mS8TLd7bCIr+eMJRQTyXe8KvkrvJq0= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= k8s.io/component-helpers v0.32.1 h1:TwdsSM1vW9GjnfX18lkrZbwE5G9psCIS2/rhenTDXd8= k8s.io/component-helpers v0.32.1/go.mod h1:1JT1Ei3FD29yFQ18F3laj1WyvxYdHIhyxx6adKMFQXI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.32.2 h1:7Ff23ht7W40gTcDwUC8G5WjX5W/nxD8WxbNhIYYNZCI= -k8s.io/kms v0.32.2/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= +k8s.io/kms v0.32.3 h1:HhHw5+pRCzEJp3oFFJ1q5W2N6gAI7YkUg4ay4Z0dgwM= +k8s.io/kms v0.32.3/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= k8s.io/kube-aggregator v0.32.1 h1:cztPyIHbo6tgrhYHDqmdmvxUufJKuxgAC/vog7yeWek= k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/metrics v0.31.1 h1:h4I4dakgh/zKflWYAOQhwf0EXaqy8LxAIyE/GBvxqRc= k8s.io/metrics v0.31.1/go.mod h1:JuH1S9tJiH9q1VCY0yzSCawi7kzNLsDzlWDJN4xR+iA= -k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= -k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro= +k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.3 h1:I6Ln8JfQjHH7JbtCD2HCYHoIzajoRxPNuvhvcDbZgkI= -sigs.k8s.io/controller-runtime v0.20.3/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= @@ -972,7 +972,7 @@ sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/g sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= -sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0 h1:qPeWmscJcXP0snki5IYF79Z8xrl8ETFxgMd7wez1XkI= +sigs.k8s.io/structured-merge-diff/v4 v4.7.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 92b78048e2..6f24dfff56 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,5 +1,8 @@ # Change history of go-restful +## [v3.12.2] - 2025-02-21 + +- allow empty payloads in post,put,patch, issue #580 ( thanks @liggitt, Jordan Liggitt) ## [v3.12.1] - 2024-05-28 @@ -18,7 +21,7 @@ - fix by restoring custom JSON handler functions (Mike Beaumont #540) -## [v3.12.0] - 2023-08-19 +## [v3.11.0] - 2023-08-19 - restored behavior as <= v3.9.0 with option to change path strategy using TrimRightSlashEnabled. diff --git a/vendor/github.com/emicklei/go-restful/v3/README.md b/vendor/github.com/emicklei/go-restful/v3/README.md index 7234604e47..3fb40d1980 100644 --- a/vendor/github.com/emicklei/go-restful/v3/README.md +++ b/vendor/github.com/emicklei/go-restful/v3/README.md @@ -3,7 +3,7 @@ go-restful package for building REST-style Web Services using Google Go [![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful) -[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://pkg.go.dev/github.com/emicklei/go-restful) +[![Go Reference](https://pkg.go.dev/badge/github.com/emicklei/go-restful.svg)](https://pkg.go.dev/github.com/emicklei/go-restful/v3) [![codecov](https://codecov.io/gh/emicklei/go-restful/branch/master/graph/badge.svg)](https://codecov.io/gh/emicklei/go-restful) - [Code examples use v3](https://github.com/emicklei/go-restful/tree/v3/examples) diff --git a/vendor/github.com/emicklei/go-restful/v3/jsr311.go b/vendor/github.com/emicklei/go-restful/v3/jsr311.go index a9b3faaa81..7f04bd9053 100644 --- a/vendor/github.com/emicklei/go-restful/v3/jsr311.go +++ b/vendor/github.com/emicklei/go-restful/v3/jsr311.go @@ -65,7 +65,7 @@ func (RouterJSR311) extractParams(pathExpr *pathExpression, matches []string) ma return params } -// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 +// https://download.oracle.com/otndocs/jcp/jaxrs-1.1-mrel-eval-oth-JSpec/ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) { candidates := make([]*Route, 0, 8) for i, each := range routes { @@ -126,9 +126,7 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R if trace { traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(previous), contentType) } - if httpRequest.ContentLength > 0 { - return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") - } + return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type") } // accept @@ -151,20 +149,9 @@ func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*R for _, candidate := range previous { available = append(available, candidate.Produces...) } - // if POST,PUT,PATCH without body - method, length := httpRequest.Method, httpRequest.Header.Get("Content-Length") - if (method == http.MethodPost || - method == http.MethodPut || - method == http.MethodPatch) && (length == "" || length == "0") { - return nil, NewError( - http.StatusUnsupportedMediaType, - fmt.Sprintf("415: Unsupported Media Type\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) - } return nil, NewError( http.StatusNotAcceptable, - fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", ")), - ) + fmt.Sprintf("406: Not Acceptable\n\nAvailable representations: %s", strings.Join(available, ", "))) } // return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil return candidates[0], nil diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 306c44be77..a2056e2acb 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -111,6 +111,8 @@ func (r Route) matchesAccept(mimeTypesWithQuality string) bool { } // Return whether this Route can consume content with a type specified by mimeTypes (can be empty). +// If the route does not specify Consumes then return true (*/*). +// If no content type is set then return true for GET,HEAD,OPTIONS,DELETE and TRACE. func (r Route) matchesContentType(mimeTypes string) bool { if len(r.Consumes) == 0 { diff --git a/vendor/github.com/fxamacker/cbor/v2/README.md b/vendor/github.com/fxamacker/cbor/v2/README.md index af0a79507e..da9f9e6f09 100644 --- a/vendor/github.com/fxamacker/cbor/v2/README.md +++ b/vendor/github.com/fxamacker/cbor/v2/README.md @@ -1,6 +1,4 @@ -# CBOR Codec in Go - - +

CBOR Codec Go logo

[fxamacker/cbor](https://github.com/fxamacker/cbor) is a library for encoding and decoding [CBOR](https://www.rfc-editor.org/info/std94) and [CBOR Sequences](https://www.rfc-editor.org/rfc/rfc8742.html). @@ -8,23 +6,26 @@ CBOR is a [trusted alternative](https://www.rfc-editor.org/rfc/rfc8949.html#name `fxamacker/cbor` is used in projects by Arm Ltd., Cisco, EdgeX Foundry, Flow Foundation, Fraunhofer‑AISEC, Kubernetes, Let's Encrypt (ISRG), Linux Foundation, Microsoft, Mozilla, Oasis Protocol, Tailscale, Teleport, [etc](https://github.com/fxamacker/cbor#who-uses-fxamackercbor). -See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `cbor.MarshalToBuffer()` and `UserBufferEncMode` accepts user-specified buffer. +See [Quick Start](#quick-start) and [Releases](https://github.com/fxamacker/cbor/releases/). 🆕 `UnmarshalFirst` and `DiagnoseFirst` can decode CBOR Sequences. `MarshalToBuffer` and `UserBufferEncMode` accepts user-specified buffer. ## fxamacker/cbor [![](https://github.com/fxamacker/cbor/workflows/ci/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3Aci) -[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A596%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A596%25%22) +[![](https://github.com/fxamacker/cbor/workflows/cover%20%E2%89%A597%25/badge.svg)](https://github.com/fxamacker/cbor/actions?query=workflow%3A%22cover+%E2%89%A597%25%22) [![CodeQL](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/fxamacker/cbor/actions/workflows/codeql-analysis.yml) [![](https://img.shields.io/badge/fuzzing-passing-44c010)](#fuzzing-and-code-coverage) [![Go Report Card](https://goreportcard.com/badge/github.com/fxamacker/cbor)](https://goreportcard.com/report/github.com/fxamacker/cbor) +[![](https://img.shields.io/ossf-scorecard/github.com/fxamacker/cbor?label=openssf%20scorecard)](https://github.com/fxamacker/cbor#fuzzing-and-code-coverage) `fxamacker/cbor` is a CBOR codec in full conformance with [IETF STD 94 (RFC 8949)](https://www.rfc-editor.org/info/std94). It also supports CBOR Sequences ([RFC 8742](https://www.rfc-editor.org/rfc/rfc8742.html)) and Extended Diagnostic Notation ([Appendix G of RFC 8610](https://www.rfc-editor.org/rfc/rfc8610.html#appendix-G)). Features include full support for CBOR tags, [Core Deterministic Encoding](https://www.rfc-editor.org/rfc/rfc8949.html#name-core-deterministic-encoding), duplicate map key detection, etc. +API is mostly same as `encoding/json`, plus interfaces that simplify concurrency and CBOR options. + Design balances trade-offs between security, speed, concurrency, encoded data size, usability, etc. -
Highlights

+

🔎  Highlights

__🚀  Speed__ @@ -38,7 +39,7 @@ Codec passed multiple confidential security assessments in 2022. No vulnerabili __🗜️  Data Size__ -Struct tags (`toarray`, `keyasint`, `omitempty`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) automatically reduce size of encoded structs. Encoding optionally shrinks float64→32→16 when values fit. __:jigsaw:  Usability__ @@ -58,164 +59,201 @@ Features include CBOR [extension points](https://www.rfc-editor.org/rfc/rfc8949. `fxamacker/cbor` has configurable limits, etc. that defend against malicious CBOR data. -By contrast, `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). - -

Example decoding with encoding/gob 💥 fatal error (out of memory)

- -```Go -// Example of encoding/gob having "fatal error: runtime: out of memory" -// while decoding 181 bytes. -package main -import ( - "bytes" - "encoding/gob" - "encoding/hex" - "fmt" -) - -// Example data is from https://github.com/golang/go/issues/24446 -// (shortened to 181 bytes). -const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + - "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + - "860001013001ff860001013001ffb80000001eff850401010e3030303030" + - "30303030303030303001ff3000010c0104000016ffb70201010830303030" + - "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + - "303030303030303030303030303030303030303030303030303030303030" + - "30" - -type X struct { - J *X - K map[string]int -} - -func main() { - raw, _ := hex.DecodeString(data) - decoder := gob.NewDecoder(bytes.NewReader(raw)) - - var x X - decoder.Decode(&x) // fatal error: runtime: out of memory - fmt.Println("Decoding finished.") -} -``` - -


- -
- -`fxamacker/cbor` is fast at rejecting malformed CBOR data. E.g. attempts to -decode 10 bytes of malicious CBOR data to `[]byte` (with default settings): - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0 | 44 ± 5% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.11 | 5353261 ± 4% | 67111321 B/op | 13 allocs/op | - -
Benchmark details

- -Latest comparison used: -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.10, linux/amd64, i5-13600K (disabled all e-cores, DDR4 @2933) -- go test -bench=. -benchmem -count=20 - -#### Prior comparisons - -| Codec | Speed (ns/op) | Memory | Allocs | -| :---- | ------------: | -----: | -----: | -| fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | -| fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | -| ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | -| ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | - -- Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` -- go1.19.6, linux/amd64, i5-13600K (DDR4) -- go test -bench=. -benchmem -count=20 - -


- -
- -### Smaller Encodings with Struct Tags - -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. - -
Example encoding 3-level nested Go struct to 1 byte CBOR

- -https://go.dev/play/p/YxwvfPdFQG2 - -```Go -// Example encoding nested struct (with omitempty tag) -// - encoding/json: 18 byte JSON -// - fxamacker/cbor: 1 byte CBOR -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - - "github.com/fxamacker/cbor/v2" -) - -type GrandChild struct { - Quux int `json:",omitempty"` -} - -type Child struct { - Baz int `json:",omitempty"` - Qux GrandChild `json:",omitempty"` -} - -type Parent struct { - Foo Child `json:",omitempty"` - Bar int `json:",omitempty"` -} - -func cb() { - results, _ := cbor.Marshal(Parent{}) - fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) - - text, _ := cbor.Diagnose(results) // Diagnostic Notation - fmt.Println("DN: " + text) -} - -func js() { - results, _ := json.Marshal(Parent{}) - fmt.Println("hex(JSON): " + hex.EncodeToString(results)) - - text := string(results) // JSON - fmt.Println("JSON: " + text) -} - -func main() { - cb() - fmt.Println("-------------") - js() -} -``` - -Output (DN is Diagnostic Notation): -``` -hex(CBOR): a0 -DN: {} -------------- -hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d -JSON: {"Foo":{"Qux":{}}} -``` - -


- -
- -Example using different struct tags together: +Notably, `fxamacker/cbor` is fast at rejecting malformed CBOR data. + +> [!NOTE] +> Benchmarks rejecting 10 bytes of malicious CBOR data decoding to `[]byte`: +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.7.0 | 47 ± 7% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.12 | 5878187 ± 3% | 67111556 B/op | 13 allocs/op | +> +> Faster hardware (overclocked DDR4 or DDR5) can reduce speed difference. +> +>
🔎  Benchmark details

+> +> Latest comparison for decoding CBOR data to Go `[]byte`: +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.22.7, linux/amd64, i5-13600K (DDR4-2933, disabled e-cores) +> - go test -bench=. -benchmem -count=20 +> +> #### Prior comparisons +> +> | Codec | Speed (ns/op) | Memory | Allocs | +> | :---- | ------------: | -----: | -----: | +> | fxamacker/cbor 2.5.0-beta2 | 44.33 ± 2% | 32 B/op | 2 allocs/op | +> | fxamacker/cbor 0.1.0 - 2.4.0 | ~44.68 ± 6% | 32 B/op | 2 allocs/op | +> | ugorji/go 1.2.10 | 5524792.50 ± 3% | 67110491 B/op | 12 allocs/op | +> | ugorji/go 1.1.0 - 1.2.6 | 💥 runtime: | out of memory: | cannot allocate | +> +> - Input: `[]byte{0x9B, 0x00, 0x00, 0x42, 0xFA, 0x42, 0xFA, 0x42, 0xFA, 0x42}` +> - go1.19.6, linux/amd64, i5-13600K (DDR4) +> - go test -bench=. -benchmem -count=20 +> +>

+ +In contrast, some codecs can crash or use excessive resources while decoding bad data. + +> [!WARNING] +> Go's `encoding/gob` is [not designed to be hardened against adversarial inputs](https://pkg.go.dev/encoding/gob#hdr-Security). +> +>
🔎  gob fatal error (out of memory) 💥 decoding 181 bytes

+> +> ```Go +> // Example of encoding/gob having "fatal error: runtime: out of memory" +> // while decoding 181 bytes (all Go versions as of Dec. 8, 2024). +> package main +> import ( +> "bytes" +> "encoding/gob" +> "encoding/hex" +> "fmt" +> ) +> +> // Example data is from https://github.com/golang/go/issues/24446 +> // (shortened to 181 bytes). +> const data = "4dffb503010102303001ff30000109010130010800010130010800010130" + +> "01ffb80001014a01ffb60001014b01ff860001013001ff860001013001ff" + +> "860001013001ff860001013001ffb80000001eff850401010e3030303030" + +> "30303030303030303001ff3000010c0104000016ffb70201010830303030" + +> "3030303001ff3000010c000030ffb6040405fcff00303030303030303030" + +> "303030303030303030303030303030303030303030303030303030303030" + +> "30" +> +> type X struct { +> J *X +> K map[string]int +> } +> +> func main() { +> raw, _ := hex.DecodeString(data) +> decoder := gob.NewDecoder(bytes.NewReader(raw)) +> +> var x X +> decoder.Decode(&x) // fatal error: runtime: out of memory +> fmt.Println("Decoding finished.") +> } +> ``` +> +> +>

+ +### Smaller Encodings with Struct Tag Options + +Struct tags automatically reduce encoded size of structs and improve speed. + +We can write less code by using struct tag options: +- `toarray`: encode without field names (decode back to original struct) +- `keyasint`: encode field names as integers (decode back to original struct) +- `omitempty`: omit empty fields when encoding +- `omitzero`: omit zero-value fields when encoding ![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags") -API is mostly same as `encoding/json`, plus interfaces that simplify concurrency for CBOR options. +> [!NOTE] +> `fxamacker/cbor` can encode a 3-level nested Go struct to 1 byte! +> - `encoding/json`: 18 bytes of JSON +> - `fxamacker/cbor`: 1 byte of CBOR +> +>
🔎  Encoding 3-level nested Go struct with omitempty

+> +> https://go.dev/play/p/YxwvfPdFQG2 +> +> ```Go +> // Example encoding nested struct (with omitempty tag) +> // - encoding/json: 18 byte JSON +> // - fxamacker/cbor: 1 byte CBOR +> +> package main +> +> import ( +> "encoding/hex" +> "encoding/json" +> "fmt" +> +> "github.com/fxamacker/cbor/v2" +> ) +> +> type GrandChild struct { +> Quux int `json:",omitempty"` +> } +> +> type Child struct { +> Baz int `json:",omitempty"` +> Qux GrandChild `json:",omitempty"` +> } +> +> type Parent struct { +> Foo Child `json:",omitempty"` +> Bar int `json:",omitempty"` +> } +> +> func cb() { +> results, _ := cbor.Marshal(Parent{}) +> fmt.Println("hex(CBOR): " + hex.EncodeToString(results)) +> +> text, _ := cbor.Diagnose(results) // Diagnostic Notation +> fmt.Println("DN: " + text) +> } +> +> func js() { +> results, _ := json.Marshal(Parent{}) +> fmt.Println("hex(JSON): " + hex.EncodeToString(results)) +> +> text := string(results) // JSON +> fmt.Println("JSON: " + text) +> } +> +> func main() { +> cb() +> fmt.Println("-------------") +> js() +> } +> ``` +> +> Output (DN is Diagnostic Notation): +> ``` +> hex(CBOR): a0 +> DN: {} +> ------------- +> hex(JSON): 7b22466f6f223a7b22517578223a7b7d7d7d +> JSON: {"Foo":{"Qux":{}}} +> ``` +> +>

+ ## Quick Start __Install__: `go get github.com/fxamacker/cbor/v2` and `import "github.com/fxamacker/cbor/v2"`. +> [!TIP] +> +> Tinygo users can try beta/experimental branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta). +> +>
🔎  More about tinygo feature branch +> +> ### Tinygo +> +> Branch [feature/cbor-tinygo-beta](https://github.com/fxamacker/cbor/tree/feature/cbor-tinygo-beta) is based on fxamacker/cbor v2.7.0 and it can be compiled using tinygo v0.33 (also compiles with golang/go). +> +> It passes unit tests (with both go1.22 and tinygo v0.33) and is considered beta/experimental for tinygo. +> +> :warning: The `feature/cbor-tinygo-beta` branch does not get fuzz tested yet. +> +> Changes in this feature branch only affect tinygo compiled software. Summary of changes: +> - default `DecOptions.MaxNestedLevels` is reduced to 16 (was 32). User can specify higher limit but 24+ crashes tests when compiled with tinygo v0.33. +> - disabled decoding CBOR tag data to Go interface because tinygo v0.33 is missing needed feature. +> - encoding error message can be different when encoding function type. +> +> Related tinygo issues: +> - https://github.com/tinygo-org/tinygo/issues/4277 +> - https://github.com/tinygo-org/tinygo/issues/4458 +> +>
+ + ### Key Points This library can encode and decode CBOR (RFC 8949) and CBOR Sequences (RFC 8742). @@ -252,16 +290,17 @@ rest, err = cbor.UnmarshalFirst(b, &v) // decode []byte b to v // DiagnoseFirst translates first CBOR data item to text and returns remaining bytes. text, rest, err = cbor.DiagnoseFirst(b) // decode []byte b to Diagnostic Notation text -// NOTE: Unmarshal returns ExtraneousDataError if there are remaining bytes, -// but new funcs UnmarshalFirst and DiagnoseFirst do not. +// NOTE: Unmarshal() returns ExtraneousDataError if there are remaining bytes, but +// UnmarshalFirst() and DiagnoseFirst() allow trailing bytes. ``` -__IMPORTANT__: 👉 CBOR settings allow trade-offs between speed, security, encoding size, etc. - -- Different CBOR libraries may use different default settings. -- CBOR-based formats or protocols usually require specific settings. - -For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. +> [!IMPORTANT] +> CBOR settings allow trade-offs between speed, security, encoding size, etc. +> +> - Different CBOR libraries may use different default settings. +> - CBOR-based formats or protocols usually require specific settings. +> +> For example, WebAuthn uses "CTAP2 Canonical CBOR" which is available as a preset. ### Presets @@ -312,9 +351,9 @@ err = em.MarshalToBuffer(v, &buf) // encode v to provided buf ### Struct Tags -Struct tags (`toarray`, `keyasint`, `omitempty`) reduce encoded size of structs. +Struct tag options (`toarray`, `keyasint`, `omitempty`, `omitzero`) reduce encoded size of structs. -
Example encoding 3-level nested Go struct to 1 byte CBOR

+

🔎  Example encoding 3-level nested Go struct to 1 byte CBOR

https://go.dev/play/p/YxwvfPdFQG2 @@ -382,13 +421,13 @@ JSON: {"Foo":{"Qux":{}}}

-
Example using several struct tags

+

🔎  Example using struct tag options

![alt text](https://github.com/fxamacker/images/raw/master/cbor/v2.3.0/cbor_struct_tags_api.svg?sanitize=1 "CBOR API and Go Struct Tags")

-Struct tags simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. +Struct tag options simplify use of CBOR-based protocols that require CBOR arrays or maps with integer keys. ### CBOR Tags @@ -404,7 +443,7 @@ em, err := opts.EncModeWithSharedTags(ts) // mutable shared CBOR tags `TagSet` and modes using it are safe for concurrent use. Equivalent API is available for `DecMode`. -
Example using TagSet and TagOptions

+

🔎  Example using TagSet and TagOptions

```go // Use signedCWT struct defined in "Decoding CWT" example. @@ -430,7 +469,7 @@ if err := dm.Unmarshal(data, &v); err != nil { em, _ := cbor.EncOptions{}.EncModeWithTags(tags) // Marshal signedCWT with tag number. -if data, err := cbor.Marshal(v); err != nil { +if data, err := em.Marshal(v); err != nil { return err } ``` @@ -439,7 +478,7 @@ if data, err := cbor.Marshal(v); err != nil { ### Functions and Interfaces -

Functions and interfaces at a glance

+

🔎  Functions and interfaces at a glance

Common functions with same API as `encoding/json`: - `Marshal`, `Unmarshal` @@ -472,11 +511,24 @@ Default limits may need to be increased for systems handling very large data (e. ## Status -v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. +v2.8.0 (March 30, 2025) is a small release primarily to add `omitzero` option to struct field tags and fix bugs. It passed fuzz tests (billions of executions) and is production quality. + +v2.8.0 and v2.7.1 fixes these 3 functions (when called directly by user apps) to use same error handling on bad inputs as `cbor.Unmarshal()`: +- `ByteString.UnmarshalCBOR()` +- `RawTag.UnmarshalCBOR()` +- `SimpleValue.UnmarshalCBOR()` + +The above 3 `UnmarshalCBOR()` functions were initially created for internal use and are deprecated now, so please use `Unmarshal()` or `UnmarshalFirst()` instead. To preserve backward compatibility, these deprecated functions were added to fuzz tests and will not be removed in v2. + +The minimum version of Go required to build: +- v2.8.0 requires go 1.20. +- v2.7.1 and older releases require go 1.17. For more details, see [release notes](https://github.com/fxamacker/cbor/releases). -### Prior Release +### Prior Releases + +v2.7.0 (June 23, 2024) adds features and improvements that help large projects (e.g. Kubernetes) use CBOR as an alternative to JSON and Protocol Buffers. Other improvements include speedups, improved memory use, bug fixes, new serialization options, etc. It passed fuzz tests (5+ billion executions) and is production quality. [v2.6.0](https://github.com/fxamacker/cbor/releases/tag/v2.6.0) (February 2024) adds important new features, optimizations, and bug fixes. It is especially useful to systems that need to convert data between CBOR and JSON. New options and optimizations improve handling of bignum, integers, maps, and strings. @@ -489,7 +541,7 @@ See [v2.5.0 release notes](https://github.com/fxamacker/cbor/releases/tag/v2.5.0 See ["Version and API Changes"](https://github.com/fxamacker/cbor#versions-and-api-changes) section for more info about version numbering, etc. +[ctc_overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview +[dac_overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview + + diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD index fbaa292204..dd3f8e5b21 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TOKEN_CACHING.MD @@ -1,60 +1,42 @@ ## Token caching in the Azure Identity client module -*Token caching* is a feature provided by the Azure Identity library that allows apps to: +Token caching helps apps: - Improve their resilience and performance. -- Reduce the number of requests made to Microsoft Entra ID to obtain access tokens. -- Reduce the number of times the user is prompted to authenticate. +- Reduce the number of requests sent to Microsoft Entra ID to obtain access tokens. +- Reduce the number of times users are prompted to authenticate. -When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID. Obtaining that token involves sending a request to Entra ID and may also involve prompting the user. Entra ID then validates the credentials provided in the request and issues an access token. +When an app needs to access a protected Azure resource, it typically needs to obtain an access token from Entra ID by sending an HTTP request and sometimes prompting a user to authenticate interactively. Credentials with caches (see [the below table](#credentials-supporting-token-caching) for a list) store access tokens either [in memory](#in-memory-token-caching) or, optionally, [on disk](#persistent-token-caching). These credentials return cached tokens whenever possible, to avoid unnecessary token requests or user interaction. Both cache implementations are safe for concurrent use. -Token caching, via the Azure Identity library, allows the app to store this access token [in memory](#in-memory-token-caching), where it's accessible to the current process, or [on disk](#persistent-token-caching) where it can be accessed across application or process invocations. The token can then be retrieved quickly and easily the next time the app needs to access the same resource. The app can avoid making another request to Entra ID, which reduces network traffic and improves resilience. Additionally, in scenarios where the app is authenticating users, token caching also avoids prompting the user each time new tokens are requested. +#### Caching can't be disabled -### In-memory token caching - -*In-memory token caching* is the default option provided by the Azure Identity library. This caching approach allows apps to store access tokens in memory. With in-memory token caching, the library first determines if a valid access token for the requested resource is already stored in memory. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. If a valid token isn't found, the library will automatically acquire a token by sending a request to Entra ID. The in-memory token cache provided by the Azure Identity library is thread-safe. - -**Note:** When Azure Identity library credentials are used with Azure service libraries (for example, Azure Blob Storage), the in-memory token caching is active in the `Pipeline` layer as well. All `TokenCredential` implementations are supported there, including custom implementations external to the Azure Identity library. +Whether a credential caches tokens isn't configurable. If a credential has a cache of either kind, it requests a new token only when it can't provide one from its cache. Azure SDK service clients have an additional, independent layer of in-memory token caching, to prevent redundant token requests. This cache works with any credential type, even a custom implementation defined outside the Azure SDK, and can't be disabled. Disabling token caching is therefore impossible when using Azure SDK clients or most `azidentity` credential types. However, in-memory caches can be cleared by constructing new credential and client instances. -#### Caching cannot be disabled +### In-memory token caching -As there are many levels of caching, it's not possible disable in-memory caching. However, the in-memory cache may be cleared by creating a new credential instance. +Credential types that support caching store tokens in memory by default and require no configuration to do so. Each instance of these types has its own cache, and two credential instances never share an in-memory cache. ### Persistent token caching -> Only azidentity v1.5.0-beta versions support persistent token caching - -*Persistent disk token caching* is an opt-in feature in the Azure Identity library. The feature allows apps to cache access tokens in an encrypted, persistent storage mechanism. As indicated in the following table, the storage mechanism differs across operating systems. - -| Operating system | Storage mechanism | -|------------------|---------------------------------------| -| Linux | kernel key retention service (keyctl) | -| macOS | Keychain | -| Windows | DPAPI | +Some credential types support opt-in persistent token caching (see [the below table](#credentials-supporting-token-caching) for a list). This feature enables credentials to store and retrieve tokens across process executions, so an application doesn't need to authenticate every time it runs. -By default the token cache will protect any data which is persisted using the user data protection APIs available on the current platform. -However, there are cases where no data protection is available, and applications may choose to allow storing the token cache in an unencrypted state by setting `TokenCachePersistenceOptions.AllowUnencryptedStorage` to `true`. This allows a credential to fall back to unencrypted storage if it can't encrypt the cache. However, we do not recommend using this storage method due to its significantly lower security measures. In addition, tokens are not encrypted solely to the current user, which could potentially allow unauthorized access to the cache by individuals with machine access. +Persistent caches are encrypted at rest using a mechanism that depends on the operating system: -With persistent disk token caching enabled, the library first determines if a valid access token for the requested resource is already stored in the persistent cache. If a valid token is found, it's returned to the app without the need to make another request to Entra ID. Additionally, the tokens are preserved across app runs, which: - -- Makes the app more resilient to failures. -- Ensures the app can continue to function during an Entra ID outage or disruption. -- Avoids having to prompt users to authenticate each time the process is restarted. - ->IMPORTANT! The token cache contains sensitive data and **MUST** be protected to prevent compromising accounts. All application decisions regarding the persistence of the token cache must consider that a breach of its content will fully compromise all the accounts it contains. - -#### Example code - -See the [package documentation](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.6.0-beta.2#pkg-overview) for example code demonstrating how to configure persistent caching and access cached data. +| Operating system | Encryption facility | Limitations | +| ---------------- | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| Linux | kernel key retention service (keyctl) | Cache data is lost on system shutdown because kernel keys are stored in memory. Depending on kernel compile options, data may also be lost on logout, or storage may be impossible because the key retention service isn't available. | +| macOS | Keychain | Building requires cgo and native build tools. Keychain access requires a graphical session, so persistent caching isn't possible in a headless environment such as an SSH session (macOS as host). | +| Windows | Data Protection API (DPAPI) | No specific limitations. | +Persistent caching requires encryption. When the required encryption facility is unuseable, or the application is running on an unsupported OS, the persistent cache constructor returns an error. This doesn't mean that authentication is impossible, only that credentials can't persist authentication data and the application will need to reauthenticate the next time it runs. See the package documentation for examples showing how to configure persistent caching and access cached data for [users][user_example] and [service principals][sp_example]. ### Credentials supporting token caching The following table indicates the state of in-memory and persistent caching in each credential type. -**Note:** In-memory caching is activated by default. Persistent token caching needs to be enabled as shown in [this example](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity@v1.5.0-beta.1#example-package-PersistentCache). +**Note:** in-memory caching is enabled by default for every type supporting it. Persistent token caching must be enabled explicitly. See the [package documentation][user_example] for an example showing how to do this for credential types authenticating users. For types that authenticate service principals, set the `Cache` field on the constructor's options as shown in [this example][sp_example]. | Credential | In-memory token caching | Persistent token caching | -|--------------------------------|---------------------------------------------------------------------|--------------------------| +| ------------------------------ | ------------------------------------------------------------------- | ------------------------ | | `AzureCLICredential` | Not Supported | Not Supported | | `AzureDeveloperCLICredential` | Not Supported | Not Supported | | `AzurePipelinesCredential` | Supported | Supported | @@ -66,6 +48,9 @@ The following table indicates the state of in-memory and persistent caching in e | `EnvironmentCredential` | Supported | Not Supported | | `InteractiveBrowserCredential` | Supported | Supported | | `ManagedIdentityCredential` | Supported | Not Supported | -| `OnBehalfOfCredential` | Supported | Supported | +| `OnBehalfOfCredential` | Supported | Not Supported | | `UsernamePasswordCredential` | Supported | Supported | | `WorkloadIdentityCredential` | Supported | Supported | + +[sp_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentServicePrincipalAuthentication +[user_example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md index 54016a0709..9c4b1cd71c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/TROUBLESHOOTING.md @@ -8,6 +8,7 @@ This troubleshooting guide covers failure investigation techniques, common error - [Permission issues](#permission-issues) - [Find relevant information in errors](#find-relevant-information-in-errors) - [Enable and configure logging](#enable-and-configure-logging) +- [Troubleshoot persistent token caching issues](#troubleshoot-persistent-token-caching-issues) - [Troubleshoot AzureCLICredential authentication issues](#troubleshoot-azureclicredential-authentication-issues) - [Troubleshoot AzureDeveloperCLICredential authentication issues](#troubleshoot-azuredeveloperclicredential-authentication-issues) - [Troubleshoot AzurePipelinesCredential authentication issues](#troubleshoot-azurepipelinescredential-authentication-issues) @@ -234,7 +235,30 @@ azd auth token --output json --scope https://management.core.windows.net/.defaul |---|---|---| | AADSTS900023: Specified tenant identifier 'some tenant ID' is neither a valid DNS name, nor a valid external domain.|The `tenantID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the tenant ID. It must identify the tenant of the user-assigned managed identity or service principal configured for the service connection.| | No service connection found with identifier |The `serviceConnectionID` argument to `NewAzurePipelinesCredential` is incorrect| Verify the service connection ID. This parameter refers to the `resourceId` of the Azure Service Connection. It can also be found in the query string of the service connection's configuration in Azure DevOps. [Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/library/service-endpoints?view=azure-devops&tabs=yaml) has more information about service connections.| -|302 (Found) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| +|401 (Unauthorized) response from OIDC endpoint|The `systemAccessToken` argument to `NewAzurePipelinesCredential` is incorrect|Check pipeline configuration. This value comes from the predefined variable `System.AccessToken` [as described in Azure Pipelines documentation](https://learn.microsoft.com/azure/devops/pipelines/build/variables?view=azure-devops&tabs=yaml#systemaccesstoken).| + +## Troubleshoot persistent token caching issues + +### macOS + +[azidentity/cache](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache) encrypts persistent caches with the system Keychain on macOS. You may see build and runtime errors there because calling the Keychain API requires cgo and macOS prohibits Keychain access in some scenarios. + +#### Build errors + +Build errors about undefined `accessor` symbols indicate that cgo wasn't enabled. For example: +``` +$ GOOS=darwin go build +# github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:19: undefined: accessor.New +../../go/pkg/mod/github.com/!azure/azure-sdk-for-go/sdk/azidentity/cache@v0.3.0/darwin.go:18:38: undefined: accessor.WithAccount +``` + +Try `go build` again with `CGO_ENABLED=1`. You may need to install native build tools. + +#### Runtime errors + +macOS prohibits Keychain access from environments without a GUI such as SSH sessions. If your application calls the persistent cache constructor ([cache.New](https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache#New)) from an SSH session on a macOS host, you'll see an error like +`persistent storage isn't available due to error "User interaction is not allowed. (-25308)"`. This doesn't mean authentication is impossible, only that credentials can't persist data and the application must reauthenticate the next time it runs. ## Get additional help diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json index bff0c44dac..4118f99ef2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "go", "TagPrefix": "go/azidentity", - "Tag": "go/azidentity_087379b475" + "Tag": "go/azidentity_191110b0dd" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go index ada4d6501d..840a71469c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/authentication_record.go @@ -18,10 +18,10 @@ import ( var supportedAuthRecordVersions = []string{"1.0"} -// authenticationRecord is non-secret account information about an authenticated user that user credentials such as +// AuthenticationRecord is non-secret account information about an authenticated user that user credentials such as // [DeviceCodeCredential] and [InteractiveBrowserCredential] can use to access previously cached authentication -// data. Call these credentials' Authenticate method to get an authenticationRecord for a user. -type authenticationRecord struct { +// data. Call these credentials' Authenticate method to get an AuthenticationRecord for a user. +type AuthenticationRecord struct { // Authority is the URL of the authority that issued the token. Authority string `json:"authority"` @@ -42,11 +42,11 @@ type authenticationRecord struct { } // UnmarshalJSON implements json.Unmarshaler for AuthenticationRecord -func (a *authenticationRecord) UnmarshalJSON(b []byte) error { +func (a *AuthenticationRecord) UnmarshalJSON(b []byte) error { // Default unmarshaling is fine but we want to return an error if the record's version isn't supported i.e., we // want to inspect the unmarshalled values before deciding whether to return an error. Unmarshaling a formally // different type enables this by assigning all the fields without recursing into this method. - type r authenticationRecord + type r AuthenticationRecord err := json.Unmarshal(b, (*r)(a)) if err != nil { return err @@ -63,7 +63,7 @@ func (a *authenticationRecord) UnmarshalJSON(b []byte) error { } // account returns the AuthenticationRecord as an MSAL Account. The account is zero-valued when the AuthenticationRecord is zero-valued. -func (a *authenticationRecord) account() public.Account { +func (a *AuthenticationRecord) account() public.Account { return public.Account{ Environment: a.Authority, HomeAccountID: a.HomeAccountID, @@ -71,10 +71,10 @@ func (a *authenticationRecord) account() public.Account { } } -func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) { +func newAuthenticationRecord(ar public.AuthResult) (AuthenticationRecord, error) { u, err := url.Parse(ar.IDToken.Issuer) if err != nil { - return authenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) + return AuthenticationRecord{}, fmt.Errorf("Authenticate expected a URL issuer but got %q", ar.IDToken.Issuer) } tenant := ar.IDToken.TenantID if tenant == "" { @@ -84,7 +84,7 @@ func newAuthenticationRecord(ar public.AuthResult) (authenticationRecord, error) if username == "" { username = ar.IDToken.UPN } - return authenticationRecord{ + return AuthenticationRecord{ Authority: fmt.Sprintf("%s://%s", u.Scheme, u.Host), ClientID: ar.IDToken.Audience, HomeAccountID: ar.Account.HomeAccountID, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go index b0965036bb..bd196ddd32 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azidentity.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/public" ) @@ -42,6 +43,8 @@ const ( developerSignOnClientID = "04b07795-8ddb-461a-bbee-02f9e1bf7b46" defaultSuffix = "/.default" + scopeLogFmt = "%s.GetToken() acquired a token for scope %q" + traceNamespace = "Microsoft.Entra" traceOpGetToken = "GetToken" traceOpAuthenticate = "Authenticate" @@ -53,8 +56,14 @@ var ( errInvalidTenantID = errors.New("invalid tenantID. You can locate your tenantID by following the instructions listed here: https://learn.microsoft.com/partner-center/find-ids-and-domain-names") ) -// tokenCachePersistenceOptions contains options for persistent token caching -type tokenCachePersistenceOptions = internal.TokenCachePersistenceOptions +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache = internal.Cache // setAuthorityHost initializes the authority host for credentials. Precedence is: // 1. cloud.Configuration.ActiveDirectoryAuthorityHost value set by user @@ -97,7 +106,16 @@ func resolveAdditionalTenants(tenants []string) []string { return cp } -// resolveTenant returns the correct tenant for a token request +// resolveTenant returns the correct tenant for a token request, or "" when the calling credential doesn't +// have an explicitly configured tenant and the caller didn't specify a tenant for the token request. +// +// - defaultTenant: tenant set when constructing the credential, if any. "" is valid for credentials +// having an optional or implicit tenant such as dev tool and interactive user credentials. Those +// default to the tool's configured tenant or the user's home tenant, respectively. +// - specified: tenant specified for this token request i.e., TokenRequestOptions.TenantID. May be "". +// - credName: name of the calling credential type; for error messages +// - additionalTenants: optional allow list of tenants the credential may acquire tokens from in +// addition to defaultTenant i.e., the credential's AdditionallyAllowedTenants option func resolveTenant(defaultTenant, specified, credName string, additionalTenants []string) (string, error) { if specified == "" || specified == defaultTenant { return defaultTenant, nil @@ -113,6 +131,17 @@ func resolveTenant(defaultTenant, specified, credName string, additionalTenants return specified, nil } } + if len(additionalTenants) == 0 { + switch defaultTenant { + case "", organizationsTenantID: + // The application didn't specify a tenant or allow list when constructing the credential. Allow the + // tenant specified for this token request because we have nothing to compare it to (i.e., it vacuously + // satisfies the credential's configuration); don't know whether the application is multitenant; and + // don't want to return an error in the common case that the specified tenant matches the credential's + // default tenant determined elsewhere e.g., in some dev tool's configuration. + return specified, nil + } + } return "", fmt.Errorf(`%s isn't configured to acquire tokens for tenant %q. To enable acquiring tokens for this tenant add it to the AdditionallyAllowedTenants on the credential options, or add "*" to allow acquiring tokens for any tenant`, credName, specified) } @@ -180,6 +209,10 @@ type msalConfidentialClient interface { AcquireTokenOnBehalfOf(ctx context.Context, userAssertion string, scopes []string, options ...confidential.AcquireOnBehalfOfOption) (confidential.AuthResult, error) } +type msalManagedIdentityClient interface { + AcquireToken(context.Context, string, ...managedidentity.AcquireTokenOption) (managedidentity.AuthResult, error) +} + // enables fakes for test scenarios type msalPublicClient interface { AcquireTokenSilent(ctx context.Context, scopes []string, options ...public.AcquireSilentOption) (public.AuthResult, error) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go index b9976f5fed..36e359a099 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_cli_credential.go @@ -30,9 +30,9 @@ type azTokenProvider func(ctx context.Context, scopes []string, tenant, subscrip // AzureCLICredentialOptions contains optional parameters for AzureCLICredential. type AzureCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // Subscription is the name or ID of a subscription. Set this to acquire tokens for an account other @@ -70,7 +70,11 @@ func NewAzureCLICredential(options *AzureCLICredentialOptions) (*AzureCLICredent } for _, r := range cp.Subscription { if !(alphanumeric(r) || r == '-' || r == '_' || r == ' ' || r == '.') { - return nil, fmt.Errorf("%s: invalid Subscription %q", credNameAzureCLI, cp.Subscription) + return nil, fmt.Errorf( + "%s: Subscription %q contains invalid characters. If this is the name of a subscription, use its ID instead", + credNameAzureCLI, + cp.Subscription, + ) } } if cp.TenantID != "" && !validTenantID(cp.TenantID) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go index cbe7c4c2db..46d0b55192 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_developer_cli_credential.go @@ -30,9 +30,9 @@ type azdTokenProvider func(ctx context.Context, scopes []string, tenant string) // AzureDeveloperCLICredentialOptions contains optional parameters for AzureDeveloperCLICredential. type AzureDeveloperCLICredentialOptions struct { - // AdditionallyAllowedTenants specifies tenants for which the credential may acquire tokens, in addition - // to TenantID. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant the - // logged in account can access. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string // TenantID identifies the tenant the credential should authenticate in. Defaults to the azd environment, diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go index 80c1806bb1..a4b8ab6f4d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/azure_pipelines_credential.go @@ -20,6 +20,8 @@ const ( credNameAzurePipelines = "AzurePipelinesCredential" oidcAPIVersion = "7.1" systemOIDCRequestURI = "SYSTEM_OIDCREQUESTURI" + xMsEdgeRef = "x-msedge-ref" + xVssE2eId = "x-vss-e2eid" ) // AzurePipelinesCredential authenticates with workload identity federation in an Azure Pipeline. See @@ -40,6 +42,11 @@ type AzurePipelinesCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -81,8 +88,11 @@ func NewAzurePipelinesCredential(tenantID, clientID, serviceConnectionID, system if options == nil { options = &AzurePipelinesCredentialOptions{} } + // these headers are useful to the DevOps team when debugging OIDC error responses + options.ClientOptions.Logging.AllowedHeaders = append(options.ClientOptions.Logging.AllowedHeaders, xMsEdgeRef, xVssE2eId) caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } @@ -108,33 +118,40 @@ func (a *AzurePipelinesCredential) getAssertion(ctx context.Context) (string, er url := a.oidcURI + "?api-version=" + oidcAPIVersion + "&serviceConnectionId=" + a.connectionID url, err := runtime.EncodeQueryParams(url) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't encode OIDC URL: "+err.Error(), nil) } req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't create OIDC token request: "+err.Error(), nil) } req.Header.Set("Authorization", "Bearer "+a.systemAccessToken) + // instruct endpoint to return 401 instead of 302, if the system access token is invalid + req.Header.Set("X-TFS-FedAuthRedirect", "Suppress") res, err := doForClient(a.cred.client.azClient, req) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't send OIDC token request: "+err.Error(), nil) } if res.StatusCode != http.StatusOK { - msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration" + msg := res.Status + " response from the OIDC endpoint. Check service connection ID and Pipeline configuration." + for _, h := range []string{xMsEdgeRef, xVssE2eId} { + if v := res.Header.Get(h); v != "" { + msg += fmt.Sprintf("\n%s: %s", h, v) + } + } // include the response because its body, if any, probably contains an error message. // OK responses aren't included with errors because they probably contain secrets - return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, msg, res) } b, err := runtime.Payload(res) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "couldn't read OIDC response content: "+err.Error(), nil) } var r struct { OIDCToken string `json:"oidcToken"` } err = json.Unmarshal(b, &r) if err != nil { - return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil, nil) + return "", newAuthenticationFailedError(credNameAzurePipelines, "unexpected response from OIDC endpoint", nil) } return r.OIDCToken, nil } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go index 6c35a941b9..82342a0254 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/chained_token_credential.go @@ -27,7 +27,10 @@ type ChainedTokenCredentialOptions struct { } // ChainedTokenCredential links together multiple credentials and tries them sequentially when authenticating. By default, -// it tries all the credentials until one authenticates, after which it always uses that credential. +// it tries all the credentials until one authenticates, after which it always uses that credential. For more information, +// see [ChainedTokenCredential overview]. +// +// [ChainedTokenCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#chainedtokencredential-overview type ChainedTokenCredential struct { cond *sync.Cond iterating bool @@ -46,6 +49,9 @@ func NewChainedTokenCredential(sources []azcore.TokenCredential, options *Chaine if source == nil { // cannot have a nil credential in the chain or else the application will panic when GetToken() is called on nil return nil, errors.New("sources cannot contain nil") } + if mc, ok := source.(*ManagedIdentityCredential); ok { + mc.mic.chained = true + } } cp := make([]azcore.TokenCredential, len(sources)) copy(cp, sources) @@ -113,11 +119,19 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token if err != nil { // return credentialUnavailableError iff all sources did so; return AuthenticationFailedError otherwise msg := createChainedErrorMessage(errs) - if errors.As(err, &unavailableErr) { + var authFailedErr *AuthenticationFailedError + switch { + case errors.As(err, &authFailedErr): + err = newAuthenticationFailedError(c.name, msg, authFailedErr.RawResponse) + if af, ok := err.(*AuthenticationFailedError); ok { + // stop Error() printing the response again; it's already in msg + af.omitResponse = true + } + case errors.As(err, &unavailableErr): err = newCredentialUnavailableError(c.name, msg) - } else { + default: res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, msg, res, err) + err = newAuthenticationFailedError(c.name, msg, res) } } return token, err @@ -126,7 +140,7 @@ func (c *ChainedTokenCredential) GetToken(ctx context.Context, opts policy.Token func createChainedErrorMessage(errs []error) string { msg := "failed to acquire a token.\nAttempted credentials:" for _, err := range errs { - msg += fmt.Sprintf("\n\t%s", err.Error()) + msg += fmt.Sprintf("\n\t%s", strings.ReplaceAll(err.Error(), "\n", "\n\t\t")) } return msg } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml index 4cd8c51447..c3af0cdc2d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/ci.yml @@ -27,15 +27,15 @@ extends: CloudConfig: Public: SubscriptionConfigurations: - - $(sub-config-azure-cloud-test-resources) - $(sub-config-identity-test-resources) - EnvVars: - SYSTEM_ACCESSTOKEN: $(System.AccessToken) + EnableRaceDetector: true + Location: westus2 RunLiveTests: true ServiceDirectory: azidentity UsePipelineProxy: false ${{ if endsWith(variables['Build.DefinitionName'], 'weekly') }}: + PersistOidcToken: true MatrixConfigs: - Name: managed_identity_matrix GenerateVMJobs: true diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go index b588750ef3..2307da86f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_assertion_credential.go @@ -37,14 +37,16 @@ type ClientAssertionCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // NewClientAssertionCredential constructs a ClientAssertionCredential. The getAssertion function must be thread safe. Pass nil for options to accept defaults. @@ -61,10 +63,10 @@ func NewClientAssertionCredential(tenantID, clientID string, getAssertion func(c }, ) msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameAssertion, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go index 80cd96b560..9e6bca1c92 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_certificate_credential.go @@ -31,6 +31,11 @@ type ClientCertificateCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making @@ -41,9 +46,6 @@ type ClientCertificateCredentialOptions struct { // header of each token request's JWT. This is required for Subject Name/Issuer (SNI) authentication. // Defaults to False. SendCertificateChain bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // ClientCertificateCredential authenticates a service principal with a certificate. @@ -65,11 +67,11 @@ func NewClientCertificateCredential(tenantID string, clientID string, certs []*x return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - SendX5C: options.SendCertificateChain, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + SendX5C: options.SendCertificateChain, } c, err := newConfidentialClient(tenantID, clientID, credNameCert, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go index 9e6772e9b8..f0890fe1ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/client_secret_credential.go @@ -32,8 +32,10 @@ type ClientSecretCredentialOptions struct { // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache } // ClientSecretCredential authenticates an application with a client secret. @@ -51,10 +53,10 @@ func NewClientSecretCredential(tenantID string, clientID string, clientSecret st return nil, err } msalOpts := confidentialClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - tokenCachePersistenceOptions: options.tokenCachePersistenceOptions, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, } c, err := newConfidentialClient(tenantID, clientID, credNameSecret, cred, msalOpts) if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go index 3bd08c685f..58c4b585c1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/confidential_client.go @@ -29,8 +29,8 @@ type confidentialClientOptions struct { AdditionallyAllowedTenants []string // Assertion for on-behalf-of authentication Assertion string + Cache Cache DisableInstanceDiscovery, SendX5C bool - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // confidentialClient wraps the MSAL confidential client @@ -107,18 +107,18 @@ func (c *confidentialClient) GetToken(ctx context.Context, tro policy.TokenReque } } if err != nil { - // We could get a credentialUnavailableError from managed identity authentication because in that case the error comes from our code. - // We return it directly because it affects the behavior of credential chains. Otherwise, we return AuthenticationFailedError. - var unavailableErr credentialUnavailable - if !errors.As(err, &unavailableErr) { - res := getResponseFromError(err) - err = newAuthenticationFailedError(c.name, err.Error(), res, err) + var ( + authFailedErr *AuthenticationFailedError + unavailableErr credentialUnavailable + ) + if !(errors.As(err, &unavailableErr) || errors.As(err, &authFailedErr)) { + err = newAuthenticationFailedErrorFromMSAL(c.name, err) } } else { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", c.name, strings.Join(ar.GrantedScopes, ", ")) + msg := fmt.Sprintf(scopeLogFmt, c.name, strings.Join(ar.GrantedScopes, ", ")) log.Write(EventAuthentication, msg) } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err } func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfidentialClient, *sync.Mutex, error) { @@ -145,7 +145,7 @@ func (c *confidentialClient) client(tro policy.TokenRequestOptions) (msalConfide } func (c *confidentialClient) newMSALClient(enableCAE bool) (msalConfidentialClient, error) { - cache, err := internal.NewCache(c.opts.tokenCachePersistenceOptions, enableCAE) + cache, err := internal.ExportReplace(c.opts.Cache, enableCAE) if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go index 551d319946..14af271f6a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/default_azure_credential.go @@ -23,23 +23,30 @@ type DefaultAzureCredentialOptions struct { // to credential types that authenticate via external tools such as the Azure CLI. azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire tokens. Add - // the wildcard value "*" to allow the credential to acquire tokens for any tenant. This value can also be - // set as a semicolon delimited list of tenants in the environment variable AZURE_ADDITIONALLY_ALLOWED_TENANTS. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. + // This value can also be set as a semicolon delimited list of tenants in the environment variable + // AZURE_ADDITIONALLY_ALLOWED_TENANTS. AdditionallyAllowedTenants []string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID sets the default tenant for authentication via the Azure CLI and workload identity. TenantID string } -// DefaultAzureCredential is a default credential chain for applications that will deploy to Azure. -// It combines credentials suitable for deployment with credentials suitable for local development. -// It attempts to authenticate with each of these credential types, in the following order, stopping -// when one provides a token: +// DefaultAzureCredential simplifies authentication while developing applications that deploy to Azure by +// combining credentials used in Azure hosting environments and credentials used in local development. In +// production, it's better to use a specific credential type so authentication is more predictable and easier +// to debug. For more information, see [DefaultAzureCredential overview]. +// +// DefaultAzureCredential attempts to authenticate with each of these credential types, in the following order, +// stopping when one provides a token: // // - [EnvironmentCredential] // - [WorkloadIdentityCredential], if environment variable configuration is set by the Azure workload @@ -52,6 +59,8 @@ type DefaultAzureCredentialOptions struct { // Consult the documentation for these credential types for more information on how they authenticate. // Once a credential has successfully authenticated, DefaultAzureCredential will use that credential for // every subsequent authentication. +// +// [DefaultAzureCredential overview]: https://aka.ms/azsdk/go/identity/credential-chains#defaultazurecredential-overview type DefaultAzureCredential struct { chain *ChainedTokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go index cd30bedd5e..53ae9767f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/device_code_credential.go @@ -21,22 +21,31 @@ const credNameDeviceCode = "DeviceCodeCredential" type DeviceCodeCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -49,9 +58,6 @@ type DeviceCodeCredentialOptions struct { // applications. TenantID string - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions - // UserPrompt controls how the credential presents authentication instructions. The credential calls // this function with authentication details when it receives a device code. By default, the credential // prints these details to stdout. @@ -101,12 +107,12 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, DeviceCodePrompt: cp.UserPrompt, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, - Record: cp.authenticationRecord, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, + Record: cp.AuthenticationRecord, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameDeviceCode, msalOpts) if err != nil { @@ -116,8 +122,9 @@ func NewDeviceCodeCredential(options *DeviceCodeCredentialOptions) (*DeviceCodeC return &DeviceCodeCredential{client: c}, nil } -// Authenticate a user via the device code flow. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *DeviceCodeCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate prompts a user to log in via the device code flow. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *DeviceCodeCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameDeviceCode+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go index b30f5474f5..ec1eab05c5 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/environment_credential.go @@ -60,7 +60,10 @@ type EnvironmentCredentialOptions struct { // Note that this credential uses [ParseCertificates] to load the certificate and key from the file. If this // function isn't able to parse your certificate, use [ClientCertificateCredential] instead. // -// # User with username and password +// # Deprecated: User with username and password +// +// User password authentication is deprecated because it can't support multifactor authentication. See +// [Entra ID documentation] for migration guidance. // // AZURE_TENANT_ID: (optional) tenant to authenticate in. Defaults to "organizations". // @@ -75,6 +78,8 @@ type EnvironmentCredentialOptions struct { // To enable multitenant authentication, set AZURE_ADDITIONALLY_ALLOWED_TENANTS with a semicolon delimited list of tenants // the credential may request tokens from in addition to the tenant specified by AZURE_TENANT_ID. Set // AZURE_ADDITIONALLY_ALLOWED_TENANTS to "*" to enable the credential to request a token from any tenant. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type EnvironmentCredential struct { cred azcore.TokenCredential } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go index 35fa01d136..b05cb035a8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/errors.go @@ -38,18 +38,30 @@ type AuthenticationFailedError struct { // RawResponse is the HTTP response motivating the error, if available. RawResponse *http.Response - credType string - message string - err error + credType, message string + omitResponse bool } -func newAuthenticationFailedError(credType string, message string, resp *http.Response, err error) error { - return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp, err: err} +func newAuthenticationFailedError(credType, message string, resp *http.Response) error { + return &AuthenticationFailedError{credType: credType, message: message, RawResponse: resp} +} + +// newAuthenticationFailedErrorFromMSAL creates an AuthenticationFailedError from an MSAL error. +// If the error is an MSAL CallErr, the new error includes an HTTP response and not the MSAL error +// message, because that message is redundant given the response. If the original error isn't a +// CallErr, the returned error incorporates its message. +func newAuthenticationFailedErrorFromMSAL(credType string, err error) error { + msg := "" + res := getResponseFromError(err) + if res == nil { + msg = err.Error() + } + return newAuthenticationFailedError(credType, msg, res) } // Error implements the error interface. Note that the message contents are not contractual and can change over time. func (e *AuthenticationFailedError) Error() string { - if e.RawResponse == nil { + if e.RawResponse == nil || e.omitResponse { return e.credType + ": " + e.message } msg := &bytes.Buffer{} @@ -62,7 +74,7 @@ func (e *AuthenticationFailedError) Error() string { fmt.Fprintln(msg, "Request information not available") } fmt.Fprintln(msg, "--------------------------------------------------------------------------------") - fmt.Fprintf(msg, "RESPONSE %s\n", e.RawResponse.Status) + fmt.Fprintf(msg, "RESPONSE %d: %s\n", e.RawResponse.StatusCode, e.RawResponse.Status) fmt.Fprintln(msg, "--------------------------------------------------------------------------------") body, err := runtime.Payload(e.RawResponse) switch { @@ -109,17 +121,17 @@ func (*AuthenticationFailedError) NonRetriable() { var _ errorinfo.NonRetriable = (*AuthenticationFailedError)(nil) -// authenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token +// AuthenticationRequiredError indicates a credential's Authenticate method must be called to acquire a token // because the credential requires user interaction and is configured not to request it automatically. -type authenticationRequiredError struct { +type AuthenticationRequiredError struct { credentialUnavailableError // TokenRequestOptions for the required token. Pass this to the credential's Authenticate method. TokenRequestOptions policy.TokenRequestOptions } -func newauthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { - return &authenticationRequiredError{ +func newAuthenticationRequiredError(credType string, tro policy.TokenRequestOptions) error { + return &AuthenticationRequiredError{ credentialUnavailableError: credentialUnavailableError{ credType + " can't acquire a token without user interaction. Call Authenticate to authenticate a user interactively", }, @@ -128,8 +140,8 @@ func newauthenticationRequiredError(credType string, tro policy.TokenRequestOpti } var ( - _ credentialUnavailable = (*authenticationRequiredError)(nil) - _ errorinfo.NonRetriable = (*authenticationRequiredError)(nil) + _ credentialUnavailable = (*AuthenticationRequiredError)(nil) + _ errorinfo.NonRetriable = (*AuthenticationRequiredError)(nil) ) type credentialUnavailable interface { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work index 04ea962b42..6dd5b3d64d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work @@ -1,4 +1,4 @@ -go 1.18 +go 1.23.0 use ( . diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum deleted file mode 100644 index c592f283b6..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/go.work.sum +++ /dev/null @@ -1,60 +0,0 @@ -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1 h1:ODs3brnqQM99Tq1PffODpAViYv3Bf8zOg464MU7p5ew= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0-beta.1/go.mod h1:3Ug6Qzto9anB6mGlEdgYMDF5zHQ+wwhEaYR4s17PHMw= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0 h1:fb8kj/Dh4CSwgsOzHeZY4Xh68cFVbzXx+ONXGMY//4w= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.0/go.mod h1:uReU2sSxZExRPBAg3qKzmAucSi51+SP1OhohieR821Q= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0/go.mod h1:okt5dMMTOFjX/aovMlrjvvXoPMBVSPzk9185BT0+eZM= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/keybase/dbus v0.0.0-20220506165403-5aa21ea2c23a/go.mod h1:YPNKjjE7Ubp9dTbnWvsP3HT+hYnY6TfXzubYTBeUxc8= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go index 056785a8a3..ec89de9b5b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/interactive_browser_credential.go @@ -20,22 +20,31 @@ const credNameBrowser = "InteractiveBrowserCredential" type InteractiveBrowserCredentialOptions struct { azcore.ClientOptions - // AdditionallyAllowedTenants specifies additional tenants for which the credential may acquire - // tokens. Add the wildcard value "*" to allow the credential to acquire tokens for any tenant. + // AdditionallyAllowedTenants specifies tenants to which the credential may authenticate, in addition to + // TenantID. When TenantID is empty, this option has no effect and the credential will authenticate to + // any requested tenant. Add the wildcard value "*" to allow the credential to authenticate to any tenant. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord - - // ClientID is the ID of the application users will authenticate to. - // Defaults to the ID of an Azure development application. + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + + // ClientID is the ID of the application to which users will authenticate. When not set, users + // will authenticate to an Azure development application, which isn't recommended for production + // scenarios. In production, developers should instead register their applications and assign + // appropriate roles. See https://aka.ms/azsdk/identity/AppRegistrationAndRoleAssignment for more + // information. ClientID string - // disableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. - // When this option is true, GetToken will return authenticationRequiredError when user interaction is necessary + // DisableAutomaticAuthentication prevents the credential from automatically prompting the user to authenticate. + // When this option is true, GetToken will return AuthenticationRequiredError when user interaction is necessary // to acquire a token. - disableAutomaticAuthentication bool + DisableAutomaticAuthentication bool // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata @@ -54,9 +63,6 @@ type InteractiveBrowserCredentialOptions struct { // TenantID is the Microsoft Entra tenant the credential authenticates in. Defaults to the // "organizations" tenant, which can authenticate work and school accounts. TenantID string - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } func (o *InteractiveBrowserCredentialOptions) init() { @@ -82,13 +88,13 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption cp.init() msalOpts := publicClientOptions{ AdditionallyAllowedTenants: cp.AdditionallyAllowedTenants, + Cache: cp.Cache, ClientOptions: cp.ClientOptions, - DisableAutomaticAuthentication: cp.disableAutomaticAuthentication, + DisableAutomaticAuthentication: cp.DisableAutomaticAuthentication, DisableInstanceDiscovery: cp.DisableInstanceDiscovery, LoginHint: cp.LoginHint, - Record: cp.authenticationRecord, + Record: cp.AuthenticationRecord, RedirectURL: cp.RedirectURL, - TokenCachePersistenceOptions: cp.tokenCachePersistenceOptions, } c, err := newPublicClient(cp.TenantID, cp.ClientID, credNameBrowser, msalOpts) if err != nil { @@ -97,8 +103,9 @@ func NewInteractiveBrowserCredential(options *InteractiveBrowserCredentialOption return &InteractiveBrowserCredential{client: c}, nil } -// Authenticate a user via the default browser. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *InteractiveBrowserCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +// Authenticate opens the default browser so a user can log in. Subsequent +// GetToken calls will automatically use the returned AuthenticationRecord. +func (c *InteractiveBrowserCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameBrowser+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go new file mode 100644 index 0000000000..c0cfe76060 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/cache.go @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +package internal + +import ( + "sync" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" +) + +// Cache represents a persistent cache that makes authentication data available across processes. +// Construct one with [github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache.New]. This package's +// [persistent user authentication example] shows how to use a persistent cache to reuse user +// logins across application runs. For service principal credential types such as +// [ClientCertificateCredential], simply set the Cache field on the credential options. +// +// [persistent user authentication example]: https://pkg.go.dev/github.com/Azure/azure-sdk-for-go/sdk/azidentity#example-package-PersistentUserAuthentication +type Cache struct { + // impl is a pointer so a Cache can carry persistent state across copies + impl *impl +} + +// impl is a Cache's private implementation +type impl struct { + // factory constructs storage implementations + factory func(bool) (cache.ExportReplace, error) + // cae and noCAE are previously constructed storage implementations. CAE + // and non-CAE tokens must be stored separately because MSAL's cache doesn't + // observe token claims. If a single storage implementation held both kinds + // of tokens, it could create a reauthentication or error loop by returning + // a non-CAE token lacking a required claim. + cae, noCAE cache.ExportReplace + // mu synchronizes around cae and noCAE + mu *sync.RWMutex +} + +func (i *impl) exportReplace(cae bool) (cache.ExportReplace, error) { + if i == nil { + // zero-value Cache: return a nil ExportReplace and MSAL will cache in memory + return nil, nil + } + var ( + err error + xr cache.ExportReplace + ) + i.mu.RLock() + xr = i.cae + if !cae { + xr = i.noCAE + } + i.mu.RUnlock() + if xr != nil { + return xr, nil + } + i.mu.Lock() + defer i.mu.Unlock() + if cae { + if i.cae == nil { + if xr, err = i.factory(cae); err == nil { + i.cae = xr + } + } + return i.cae, err + } + if i.noCAE == nil { + if xr, err = i.factory(cae); err == nil { + i.noCAE = xr + } + } + return i.noCAE, err +} + +// NewCache is the constructor for Cache. It takes a factory instead of an instance +// because it doesn't know whether the Cache will store both CAE and non-CAE tokens. +func NewCache(factory func(cae bool) (cache.ExportReplace, error)) Cache { + return Cache{&impl{factory: factory, mu: &sync.RWMutex{}}} +} + +// ExportReplace returns an implementation satisfying MSAL's ExportReplace interface. +// It's a function instead of a method on Cache so packages in azidentity and +// azidentity/cache can call it while applications can't. "cae" declares whether the +// caller intends this implementation to store CAE tokens. +func ExportReplace(c Cache, cae bool) (cache.ExportReplace, error) { + return c.impl.exportReplace(cae) +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go deleted file mode 100644 index b1b4d5c8bd..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/exported.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -// TokenCachePersistenceOptions contains options for persistent token caching -type TokenCachePersistenceOptions struct { - // AllowUnencryptedStorage controls whether the cache should fall back to storing its data in plain text - // when encryption isn't possible. Setting this true doesn't disable encryption. The cache always attempts - // encryption before falling back to plaintext storage. - AllowUnencryptedStorage bool - - // Name identifies the cache. Set this to isolate data from other applications. - Name string -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go deleted file mode 100644 index c1498b4644..0000000000 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/internal/internal.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build go1.18 -// +build go1.18 - -// Copyright (c) Microsoft Corporation. All rights reserved. -// Licensed under the MIT License. - -package internal - -import ( - "errors" - - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" -) - -var errMissingImport = errors.New("import github.com/Azure/azure-sdk-for-go/sdk/azidentity/cache to enable persistent caching") - -// NewCache constructs a persistent token cache when "o" isn't nil. Applications that intend to -// use a persistent cache must first import the cache module, which will replace this function -// with a platform-specific implementation. -var NewCache = func(o *TokenCachePersistenceOptions, enableCAE bool) (cache.ExportReplace, error) { - if o == nil { - return nil, nil - } - return nil, errMissingImport -} - -// CacheFilePath returns the path to the cache file for the given name. -// Defining it in this package makes it available to azidentity tests. -var CacheFilePath = func(name string) (string, error) { - return "", errMissingImport -} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json index 1c3791777a..edd56f9d57 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed-identity-matrix.json @@ -9,7 +9,7 @@ } }, "GoVersion": [ - "1.22.1" + "env:GO_VERSION_PREVIOUS" ], "IDENTITY_IMDS_AVAILABLE": "1" } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go index 6122cc7005..b3a0f85883 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_client.go @@ -8,24 +8,18 @@ package azidentity import ( "context" - "encoding/json" "errors" "fmt" "net/http" - "net/url" - "os" - "path/filepath" - "runtime" - "strconv" "strings" "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" azruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/Azure/azure-sdk-for-go/sdk/azcore/streaming" "github.com/Azure/azure-sdk-for-go/sdk/internal/log" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" + msalerrors "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity" ) const ( @@ -41,56 +35,20 @@ const ( msiResID = "msi_res_id" msiSecret = "MSI_SECRET" imdsAPIVersion = "2018-02-01" - azureArcAPIVersion = "2019-08-15" + azureArcAPIVersion = "2020-06-01" qpClientID = "client_id" serviceFabricAPIVersion = "2019-07-01-preview" ) var imdsProbeTimeout = time.Second -type msiType int - -const ( - msiTypeAppService msiType = iota - msiTypeAzureArc - msiTypeAzureML - msiTypeCloudShell - msiTypeIMDS - msiTypeServiceFabric -) - type managedIdentityClient struct { - azClient *azcore.Client - endpoint string - id ManagedIDKind - msiType msiType - probeIMDS bool -} - -// arcKeyDirectory returns the directory expected to contain Azure Arc keys -var arcKeyDirectory = func() (string, error) { - switch runtime.GOOS { - case "linux": - return "/var/opt/azcmagent/tokens", nil - case "windows": - pd := os.Getenv("ProgramData") - if pd == "" { - return "", errors.New("environment variable ProgramData has no value") - } - return filepath.Join(pd, "AzureConnectedMachineAgent", "Tokens"), nil - default: - return "", fmt.Errorf("unsupported OS %q", runtime.GOOS) - } -} - -type wrappedNumber json.Number - -func (n *wrappedNumber) UnmarshalJSON(b []byte) error { - c := string(b) - if c == "\"\"" { - return nil - } - return json.Unmarshal(b, (*json.Number)(n)) + azClient *azcore.Client + imds, probeIMDS, userAssigned bool + // chained indicates whether the client is part of a credential chain. If true, the client will return + // a credentialUnavailableError instead of an AuthenticationFailedError for an unexpected IMDS response. + chained bool + msalClient msalManagedIdentityClient } // setIMDSRetryOptionDefaults sets zero-valued fields to default values appropriate for IMDS @@ -138,39 +96,20 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag options = &ManagedIdentityCredentialOptions{} } cp := options.ClientOptions - c := managedIdentityClient{id: options.ID, endpoint: imdsEndpoint, msiType: msiTypeIMDS} - env := "IMDS" - if endpoint, ok := os.LookupEnv(identityEndpoint); ok { - if _, ok := os.LookupEnv(identityHeader); ok { - if _, ok := os.LookupEnv(identityServerThumbprint); ok { - env = "Service Fabric" - c.endpoint = endpoint - c.msiType = msiTypeServiceFabric - } else { - env = "App Service" - c.endpoint = endpoint - c.msiType = msiTypeAppService - } - } else if _, ok := os.LookupEnv(arcIMDSEndpoint); ok { - env = "Azure Arc" - c.endpoint = endpoint - c.msiType = msiTypeAzureArc - } - } else if endpoint, ok := os.LookupEnv(msiEndpoint); ok { - c.endpoint = endpoint - if _, ok := os.LookupEnv(msiSecret); ok { - env = "Azure ML" - c.msiType = msiTypeAzureML - } else { - env = "Cloud Shell" - c.msiType = msiTypeCloudShell - } - } else { + c := managedIdentityClient{} + source, err := managedidentity.GetSource() + if err != nil { + return nil, err + } + env := string(source) + if source == managedidentity.DefaultToIMDS { + env = "IMDS" + c.imds = true c.probeIMDS = options.dac setIMDSRetryOptionDefaults(&cp.Retry) } - client, err := azcore.NewClient(module, version, azruntime.PipelineOptions{ + c.azClient, err = azcore.NewClient(module, version, azruntime.PipelineOptions{ Tracing: azruntime.TracingOptions{ Namespace: traceNamespace, }, @@ -178,39 +117,65 @@ func newManagedIdentityClient(options *ManagedIdentityCredentialOptions) (*manag if err != nil { return nil, err } - c.azClient = client + + id := managedidentity.SystemAssigned() + if options.ID != nil { + c.userAssigned = true + switch s := options.ID.String(); options.ID.idKind() { + case miClientID: + id = managedidentity.UserAssignedClientID(s) + case miObjectID: + id = managedidentity.UserAssignedObjectID(s) + case miResourceID: + id = managedidentity.UserAssignedResourceID(s) + } + } + msalClient, err := managedidentity.New(id, managedidentity.WithHTTPClient(&c), managedidentity.WithRetryPolicyDisabled()) + if err != nil { + return nil, err + } + c.msalClient = &msalClient if log.Should(EventAuthentication) { - log.Writef(EventAuthentication, "Managed Identity Credential will use %s managed identity", env) + msg := fmt.Sprintf("%s will use %s managed identity", credNameManagedIdentity, env) + if options.ID != nil { + kind := "client" + switch options.ID.(type) { + case ObjectID: + kind = "object" + case ResourceID: + kind = "resource" + } + msg += fmt.Sprintf(" with %s ID %q", kind, options.ID.String()) + } + log.Write(EventAuthentication, msg) } return &c, nil } -// provideToken acquires a token for MSAL's confidential.Client, which caches the token -func (c *managedIdentityClient) provideToken(ctx context.Context, params confidential.TokenProviderParameters) (confidential.TokenProviderResult, error) { - result := confidential.TokenProviderResult{} - tk, err := c.authenticate(ctx, c.id, params.Scopes) - if err == nil { - result.AccessToken = tk.Token - result.ExpiresInSeconds = int(time.Until(tk.ExpiresOn).Seconds()) - } - return result, err +func (*managedIdentityClient) CloseIdleConnections() { + // do nothing +} + +func (c *managedIdentityClient) Do(r *http.Request) (*http.Response, error) { + return doForClient(c.azClient, r) } // authenticate acquires an access token -func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKind, scopes []string) (azcore.AccessToken, error) { +func (c *managedIdentityClient) GetToken(ctx context.Context, tro policy.TokenRequestOptions) (azcore.AccessToken, error) { // no need to synchronize around this value because it's true only when DefaultAzureCredential constructed the client, // and in that case ChainedTokenCredential.GetToken synchronizes goroutines that would execute this block if c.probeIMDS { + // send a malformed request (no Metadata header) to IMDS to determine whether the endpoint is available cx, cancel := context.WithTimeout(ctx, imdsProbeTimeout) defer cancel() cx = policy.WithRetryOptions(cx, policy.RetryOptions{MaxRetries: -1}) - req, err := azruntime.NewRequest(cx, http.MethodGet, c.endpoint) - if err == nil { - _, err = c.azClient.Pipeline().Do(req) - } + req, err := azruntime.NewRequest(cx, http.MethodGet, imdsEndpoint) if err != nil { + return azcore.AccessToken{}, fmt.Errorf("failed to create IMDS probe request: %s", err) + } + if _, err = c.azClient.Pipeline().Do(req); err != nil { msg := err.Error() if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { msg = "managed identity timed out. See https://aka.ms/azsdk/go/identity/troubleshoot#dac for more information" @@ -221,25 +186,27 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi c.probeIMDS = false } - msg, err := c.createAuthRequest(ctx, id, scopes) - if err != nil { - return azcore.AccessToken{}, err - } - - resp, err := c.azClient.Pipeline().Do(msg) - if err != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, err.Error(), nil, err) - } - - if azruntime.HasStatusCode(resp, http.StatusOK, http.StatusCreated) { - return c.createAccessToken(resp) + ar, err := c.msalClient.AcquireToken(ctx, tro.Scopes[0], managedidentity.WithClaims(tro.Claims)) + if err == nil { + msg := fmt.Sprintf(scopeLogFmt, credNameManagedIdentity, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err } - - if c.msiType == msiTypeIMDS { + if c.imds { + var ije msalerrors.InvalidJsonErr + if c.chained && errors.As(err, &ije) { + // an unmarshaling error implies the response is from something other than IMDS such as a proxy listening at + // the same address. Return a credentialUnavailableError so credential chains continue to their next credential + return azcore.AccessToken{}, newCredentialUnavailableError(credNameManagedIdentity, err.Error()) + } + resp := getResponseFromError(err) + if resp == nil { + return azcore.AccessToken{}, newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err) + } switch resp.StatusCode { case http.StatusBadRequest: - if id != nil { - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp, nil) + if c.userAssigned { + return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "the requested identity isn't assigned to this resource", resp) } msg := "failed to authenticate a system assigned identity" if body, err := azruntime.Payload(resp); err == nil && len(body) > 0 { @@ -255,247 +222,6 @@ func (c *managedIdentityClient) authenticate(ctx context.Context, id ManagedIDKi } } } - - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "authentication failed", resp, nil) -} - -func (c *managedIdentityClient) createAccessToken(res *http.Response) (azcore.AccessToken, error) { - value := struct { - // these are the only fields that we use - Token string `json:"access_token,omitempty"` - RefreshToken string `json:"refresh_token,omitempty"` - ExpiresIn wrappedNumber `json:"expires_in,omitempty"` // this field should always return the number of seconds for which a token is valid - ExpiresOn interface{} `json:"expires_on,omitempty"` // the value returned in this field varies between a number and a date string - }{} - if err := azruntime.UnmarshalAsJSON(res, &value); err != nil { - return azcore.AccessToken{}, fmt.Errorf("internal AccessToken: %v", err) - } - if value.ExpiresIn != "" { - expiresIn, err := json.Number(value.ExpiresIn).Int64() - if err != nil { - return azcore.AccessToken{}, err - } - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Now().Add(time.Second * time.Duration(expiresIn)).UTC()}, nil - } - switch v := value.ExpiresOn.(type) { - case float64: - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(v), 0).UTC()}, nil - case string: - if expiresOn, err := strconv.Atoi(v); err == nil { - return azcore.AccessToken{Token: value.Token, ExpiresOn: time.Unix(int64(expiresOn), 0).UTC()}, nil - } - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, "unexpected expires_on value: "+v, res, nil) - default: - msg := fmt.Sprintf("unsupported type received in expires_on: %T, %v", v, v) - return azcore.AccessToken{}, newAuthenticationFailedError(credNameManagedIdentity, msg, res, nil) - } -} - -func (c *managedIdentityClient) createAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - switch c.msiType { - case msiTypeIMDS: - return c.createIMDSAuthRequest(ctx, id, scopes) - case msiTypeAppService: - return c.createAppServiceAuthRequest(ctx, id, scopes) - case msiTypeAzureArc: - // need to perform preliminary request to retreive the secret key challenge provided by the HIMDS service - key, err := c.getAzureArcSecretKey(ctx, scopes) - if err != nil { - msg := fmt.Sprintf("failed to retreive secret key from the identity endpoint: %v", err) - return nil, newAuthenticationFailedError(credNameManagedIdentity, msg, nil, err) - } - return c.createAzureArcAuthRequest(ctx, id, scopes, key) - case msiTypeAzureML: - return c.createAzureMLAuthRequest(ctx, id, scopes) - case msiTypeServiceFabric: - return c.createServiceFabricAuthRequest(ctx, id, scopes) - case msiTypeCloudShell: - return c.createCloudShellAuthRequest(ctx, id, scopes) - default: - return nil, newCredentialUnavailableError(credNameManagedIdentity, "managed identity isn't supported in this environment") - } -} - -func (c *managedIdentityClient) createIMDSAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - q := request.Raw().URL.Query() - q.Add("api-version", imdsAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) - if id != nil { - if id.idKind() == miResourceID { - q.Add(msiResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createAppServiceAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeader)) - q := request.Raw().URL.Query() - q.Add("api-version", "2019-08-01") - q.Add("resource", scopes[0]) - if id != nil { - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createAzureMLAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set("secret", os.Getenv(msiSecret)) - q := request.Raw().URL.Query() - q.Add("api-version", "2017-09-01") - q.Add("resource", strings.Join(scopes, " ")) - q.Add("clientid", os.Getenv(defaultIdentityClientID)) - if id != nil { - if id.idKind() == miResourceID { - log.Write(EventAuthentication, "WARNING: Azure ML doesn't support specifying a managed identity by resource ID") - q.Set("clientid", "") - q.Set(miResID, id.String()) - } else { - q.Set("clientid", id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createServiceFabricAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - q := request.Raw().URL.Query() - request.Raw().Header.Set("Accept", "application/json") - request.Raw().Header.Set("Secret", os.Getenv(identityHeader)) - q.Add("api-version", serviceFabricAPIVersion) - q.Add("resource", strings.Join(scopes, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Service Fabric doesn't support selecting a user-assigned identity at runtime") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) getAzureArcSecretKey(ctx context.Context, resources []string) (string, error) { - // create the request to retreive the secret key challenge provided by the HIMDS service - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return "", err - } - request.Raw().Header.Set(headerMetadata, "true") - q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) - request.Raw().URL.RawQuery = q.Encode() - // send the initial request to get the short-lived secret key - response, err := c.azClient.Pipeline().Do(request) - if err != nil { - return "", err - } - // the endpoint is expected to return a 401 with the WWW-Authenticate header set to the location - // of the secret key file. Any other status code indicates an error in the request. - if response.StatusCode != 401 { - msg := fmt.Sprintf("expected a 401 response, received %d", response.StatusCode) - return "", newAuthenticationFailedError(credNameManagedIdentity, msg, response, nil) - } - header := response.Header.Get("WWW-Authenticate") - if len(header) == 0 { - return "", newAuthenticationFailedError(credNameManagedIdentity, "HIMDS response has no WWW-Authenticate header", nil, nil) - } - // the WWW-Authenticate header is expected in the following format: Basic realm=/some/file/path.key - _, p, found := strings.Cut(header, "=") - if !found { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected WWW-Authenticate header from HIMDS: "+header, nil, nil) - } - expected, err := arcKeyDirectory() - if err != nil { - return "", err - } - if filepath.Dir(p) != expected || !strings.HasSuffix(p, ".key") { - return "", newAuthenticationFailedError(credNameManagedIdentity, "unexpected file path from HIMDS service: "+p, nil, nil) - } - f, err := os.Stat(p) - if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not stat %q: %v", p, err), nil, nil) - } - if s := f.Size(); s > 4096 { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("key is too large (%d bytes)", s), nil, nil) - } - key, err := os.ReadFile(p) - if err != nil { - return "", newAuthenticationFailedError(credNameManagedIdentity, fmt.Sprintf("could not read %q: %v", p, err), nil, nil) - } - return string(key), nil -} - -func (c *managedIdentityClient) createAzureArcAuthRequest(ctx context.Context, id ManagedIDKind, resources []string, key string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodGet, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - request.Raw().Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) - q := request.Raw().URL.Query() - q.Add("api-version", azureArcAPIVersion) - q.Add("resource", strings.Join(resources, " ")) - if id != nil { - log.Write(EventAuthentication, "WARNING: Azure Arc doesn't support user-assigned managed identities") - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } - request.Raw().URL.RawQuery = q.Encode() - return request, nil -} - -func (c *managedIdentityClient) createCloudShellAuthRequest(ctx context.Context, id ManagedIDKind, scopes []string) (*policy.Request, error) { - request, err := azruntime.NewRequest(ctx, http.MethodPost, c.endpoint) - if err != nil { - return nil, err - } - request.Raw().Header.Set(headerMetadata, "true") - data := url.Values{} - data.Set("resource", strings.Join(scopes, " ")) - dataEncoded := data.Encode() - body := streaming.NopCloser(strings.NewReader(dataEncoded)) - if err := request.SetBody(body, "application/x-www-form-urlencoded"); err != nil { - return nil, err - } - if id != nil { - log.Write(EventAuthentication, "WARNING: Cloud Shell doesn't support user-assigned managed identities") - q := request.Raw().URL.Query() - if id.idKind() == miResourceID { - q.Add(miResID, id.String()) - } else { - q.Add(qpClientID, id.String()) - } - } - return request, nil + err = newAuthenticationFailedErrorFromMSAL(credNameManagedIdentity, err) + return azcore.AccessToken{}, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go index 13c043d8e0..11b686ccda 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/managed_identity_credential.go @@ -14,7 +14,6 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azcore" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential" ) const credNameManagedIdentity = "ManagedIdentityCredential" @@ -22,8 +21,9 @@ const credNameManagedIdentity = "ManagedIdentityCredential" type managedIdentityIDKind int const ( - miClientID managedIdentityIDKind = 0 - miResourceID managedIdentityIDKind = 1 + miClientID managedIdentityIDKind = iota + miObjectID + miResourceID ) // ManagedIDKind identifies the ID of a managed identity as either a client or resource ID @@ -32,7 +32,12 @@ type ManagedIDKind interface { idKind() managedIdentityIDKind } -// ClientID is the client ID of a user-assigned managed identity. +// ClientID is the client ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ClientID is specified on the following platforms: +// +// - Azure Arc +// - Cloud Shell +// - Service Fabric type ClientID string func (ClientID) idKind() managedIdentityIDKind { @@ -44,7 +49,31 @@ func (c ClientID) String() string { return string(c) } -// ResourceID is the resource ID of a user-assigned managed identity. +// ObjectID is the object ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when an ObjectID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric +type ObjectID string + +func (ObjectID) idKind() managedIdentityIDKind { + return miObjectID +} + +// String returns the string value of the ID. +func (o ObjectID) String() string { + return string(o) +} + +// ResourceID is the resource ID of a user-assigned managed identity. [NewManagedIdentityCredential] +// returns an error when a ResourceID is specified on the following platforms: +// +// - Azure Arc +// - Azure ML +// - Cloud Shell +// - Service Fabric type ResourceID string func (ResourceID) idKind() managedIdentityIDKind { @@ -60,9 +89,10 @@ func (r ResourceID) String() string { type ManagedIdentityCredentialOptions struct { azcore.ClientOptions - // ID is the ID of a managed identity the credential should authenticate. Set this field to use a specific identity - // instead of the hosting environment's default. The value may be the identity's client ID or resource ID, but note that - // some platforms don't accept resource IDs. + // ID of a managed identity the credential should authenticate. Set this field to use a specific identity instead of + // the hosting environment's default. The value may be the identity's client, object, or resource ID. + // NewManagedIdentityCredential returns an error when the hosting environment doesn't support user-assigned managed + // identities, or the specified kind of ID. ID ManagedIDKind // dac indicates whether the credential is part of DefaultAzureCredential. When true, and the environment doesn't have @@ -73,13 +103,13 @@ type ManagedIdentityCredentialOptions struct { dac bool } -// ManagedIdentityCredential authenticates an Azure managed identity in any hosting environment supporting managed identities. +// ManagedIdentityCredential authenticates an [Azure managed identity] in any hosting environment supporting managed identities. // This credential authenticates a system-assigned identity by default. Use ManagedIdentityCredentialOptions.ID to specify a -// user-assigned identity. See Microsoft Entra ID documentation for more information about managed identities: -// https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview +// user-assigned identity. +// +// [Azure managed identity]: https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/overview type ManagedIdentityCredential struct { - client *confidentialClient - mic *managedIdentityClient + mic *managedIdentityClient } // NewManagedIdentityCredential creates a ManagedIdentityCredential. Pass nil to accept default options. @@ -91,38 +121,22 @@ func NewManagedIdentityCredential(options *ManagedIdentityCredentialOptions) (*M if err != nil { return nil, err } - cred := confidential.NewCredFromTokenProvider(mic.provideToken) - - // It's okay to give MSAL an invalid client ID because MSAL will use it only as part of a cache key. - // ManagedIdentityClient handles all the details of authentication and won't receive this value from MSAL. - clientID := "SYSTEM-ASSIGNED-MANAGED-IDENTITY" - if options.ID != nil { - clientID = options.ID.String() - } - // similarly, it's okay to give MSAL an incorrect tenant because MSAL won't use the value - c, err := newConfidentialClient("common", clientID, credNameManagedIdentity, cred, confidentialClientOptions{ - ClientOptions: options.ClientOptions, - }) - if err != nil { - return nil, err - } - return &ManagedIdentityCredential{client: c, mic: mic}, nil + return &ManagedIdentityCredential{mic: mic}, nil } // GetToken requests an access token from the hosting environment. This method is called automatically by Azure SDK clients. func (c *ManagedIdentityCredential) GetToken(ctx context.Context, opts policy.TokenRequestOptions) (azcore.AccessToken, error) { var err error - ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.client.azClient.Tracer(), nil) + ctx, endSpan := runtime.StartSpan(ctx, credNameManagedIdentity+"."+traceOpGetToken, c.mic.azClient.Tracer(), nil) defer func() { endSpan(err) }() if len(opts.Scopes) != 1 { err = fmt.Errorf("%s.GetToken() requires exactly one scope", credNameManagedIdentity) return azcore.AccessToken{}, err } - // managed identity endpoints require a Microsoft Entra ID v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here + // managed identity endpoints require a v1 resource (i.e. token audience), not a v2 scope, so we remove "/.default" here opts.Scopes = []string{strings.TrimSuffix(opts.Scopes[0], defaultSuffix)} - tk, err := c.client.GetToken(ctx, opts) - return tk, err + return c.mic.GetToken(ctx, opts) } var _ azcore.TokenCredential = (*ManagedIdentityCredential)(nil) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go index b3d22dbf3c..053d1785f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/public_client.go @@ -30,12 +30,12 @@ type publicClientOptions struct { azcore.ClientOptions AdditionallyAllowedTenants []string + Cache Cache DeviceCodePrompt func(context.Context, DeviceCodeMessage) error DisableAutomaticAuthentication bool DisableInstanceDiscovery bool LoginHint, RedirectURL string - Record authenticationRecord - TokenCachePersistenceOptions *tokenCachePersistenceOptions + Record AuthenticationRecord Username, Password string } @@ -48,7 +48,7 @@ type publicClient struct { host string name string opts publicClientOptions - record authenticationRecord + record AuthenticationRecord azClient *azcore.Client } @@ -107,19 +107,19 @@ func newPublicClient(tenantID, clientID, name string, o publicClientOptions) (*p }, nil } -func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (authenticationRecord, error) { +func (p *publicClient) Authenticate(ctx context.Context, tro *policy.TokenRequestOptions) (AuthenticationRecord, error) { if tro == nil { tro = &policy.TokenRequestOptions{} } if len(tro.Scopes) == 0 { if p.defaultScope == nil { - return authenticationRecord{}, errScopeRequired + return AuthenticationRecord{}, errScopeRequired } tro.Scopes = p.defaultScope } client, mu, err := p.client(*tro) if err != nil { - return authenticationRecord{}, err + return AuthenticationRecord{}, err } mu.Lock() defer mu.Unlock() @@ -152,14 +152,9 @@ func (p *publicClient) GetToken(ctx context.Context, tro policy.TokenRequestOpti return p.token(ar, err) } if p.opts.DisableAutomaticAuthentication { - return azcore.AccessToken{}, newauthenticationRequiredError(p.name, tro) + return azcore.AccessToken{}, newAuthenticationRequiredError(p.name, tro) } - at, err := p.reqToken(ctx, client, tro) - if err == nil { - msg := fmt.Sprintf("%s.GetToken() acquired a token for scope %q", p.name, strings.Join(ar.GrantedScopes, ", ")) - log.Write(EventAuthentication, msg) - } - return at, err + return p.reqToken(ctx, client, tro) } // reqToken requests a token from the MSAL public client. It's separate from GetToken() to enable Authenticate() to bypass the cache. @@ -222,13 +217,13 @@ func (p *publicClient) client(tro policy.TokenRequestOptions) (msalPublicClient, } func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { - cache, err := internal.NewCache(p.opts.TokenCachePersistenceOptions, enableCAE) + c, err := internal.ExportReplace(p.opts.Cache, enableCAE) if err != nil { return nil, err } o := []public.Option{ public.WithAuthority(runtime.JoinPaths(p.host, p.tenantID)), - public.WithCache(cache), + public.WithCache(c), public.WithHTTPClient(p), } if enableCAE { @@ -242,12 +237,13 @@ func (p *publicClient) newMSALClient(enableCAE bool) (msalPublicClient, error) { func (p *publicClient) token(ar public.AuthResult, err error) (azcore.AccessToken, error) { if err == nil { + msg := fmt.Sprintf(scopeLogFmt, p.name, strings.Join(ar.GrantedScopes, ", ")) + log.Write(EventAuthentication, msg) p.record, err = newAuthenticationRecord(ar) } else { - res := getResponseFromError(err) - err = newAuthenticationFailedError(p.name, err.Error(), res, err) + err = newAuthenticationFailedErrorFromMSAL(p.name, err) } - return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC()}, err + return azcore.AccessToken{Token: ar.AccessToken, ExpiresOn: ar.ExpiresOn.UTC(), RefreshOn: ar.Metadata.RefreshOn.UTC()}, err } // resolveTenant returns the correct WithTenantID() argument for a token request given the client's diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 index a69bbce34c..67f97fbb2b 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources-post.ps1 @@ -5,7 +5,27 @@ param ( [hashtable] $AdditionalParameters = @{}, - [hashtable] $DeploymentOutputs + [hashtable] $DeploymentOutputs, + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $SubscriptionId, + + [Parameter(ParameterSetName = 'Provisioner', Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $TenantId, + + [Parameter()] + [ValidatePattern('^[0-9a-f]{8}(-[0-9a-f]{4}){3}-[0-9a-f]{12}$')] + [string] $TestApplicationId, + + [Parameter(Mandatory = $true)] + [ValidateNotNullOrEmpty()] + [string] $Environment, + + # Captures any arguments from eng/New-TestResources.ps1 not declared here (no parameter errors). + [Parameter(ValueFromRemainingArguments = $true)] + $RemainingArguments ) $ErrorActionPreference = 'Stop' @@ -16,14 +36,15 @@ if ($CI) { Write-Host "Skipping post-provisioning script because resources weren't deployed" return } - az login --service-principal -u $DeploymentOutputs['AZIDENTITY_CLIENT_ID'] -p $DeploymentOutputs['AZIDENTITY_CLIENT_SECRET'] --tenant $DeploymentOutputs['AZIDENTITY_TENANT_ID'] - az account set --subscription $DeploymentOutputs['AZIDENTITY_SUBSCRIPTION_ID'] + az cloud set -n $Environment + az login --federated-token $env:ARM_OIDC_TOKEN --service-principal -t $TenantId -u $TestApplicationId + az account set --subscription $SubscriptionId } Write-Host "Building container" $image = "$($DeploymentOutputs['AZIDENTITY_ACR_LOGIN_SERVER'])/azidentity-managed-id-test" Set-Content -Path "$PSScriptRoot/Dockerfile" -Value @" -FROM mcr.microsoft.com/oss/go/microsoft/golang:latest as builder +FROM mcr.microsoft.com/oss/go/microsoft/golang:latest AS builder ENV GOARCH=amd64 GOWORK=off COPY . /azidentity WORKDIR /azidentity/testdata/managed-id-test @@ -50,13 +71,20 @@ $aciName = "azidentity-test" az container create -g $rg -n $aciName --image $image ` --acr-identity $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` --assign-identity [system] $($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + --cpu 1 ` + --ip-address Public ` + --memory 1.0 ` + --os-type Linux ` --role "Storage Blob Data Reader" ` --scope $($DeploymentOutputs['AZIDENTITY_STORAGE_ID']) ` -e AZIDENTITY_STORAGE_NAME=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME']) ` - AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` - AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` - FUNCTIONS_CUSTOMHANDLER_PORT=80 -Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_NAME;]$aciName" + AZIDENTITY_STORAGE_NAME_USER_ASSIGNED=$($DeploymentOutputs['AZIDENTITY_STORAGE_NAME_USER_ASSIGNED']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID']) ` + AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID=$($DeploymentOutputs['AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID']) ` + FUNCTIONS_CUSTOMHANDLER_PORT=80 +$aciIP = az container show -g $rg -n $aciName --query ipAddress.ip --output tsv +Write-Host "##vso[task.setvariable variable=AZIDENTITY_ACI_IP;]$aciIP" # Azure Functions deployment: copy the Windows binary from the Docker image, deploy it in a zip Write-Host "Deploying to Azure Functions" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep index 2a21652930..135feb0178 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/test-resources.bicep @@ -135,6 +135,14 @@ resource azfunc 'Microsoft.Web/sites@2021-03-01' = if (deployResources) { name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY' value: deployResources ? usermgdid.id : null } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID' + value: deployResources ? usermgdid.properties.clientId : null + } + { + name: 'AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID' + value: deployResources ? usermgdid.properties.principalId : null + } { name: 'AzureWebJobsStorage' value: 'DefaultEndpointsProtocol=https;AccountName=${deployResources ? sa.name : ''};EndpointSuffix=${deployResources ? environment().suffixes.storage : ''};AccountKey=${deployResources ? sa.listKeys().keys[0].value : ''}' @@ -217,3 +225,4 @@ output AZIDENTITY_STORAGE_NAME_USER_ASSIGNED string = deployResources ? saUserAs output AZIDENTITY_USER_ASSIGNED_IDENTITY string = deployResources ? usermgdid.id : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_CLIENT_ID string = deployResources ? usermgdid.properties.clientId : '' output AZIDENTITY_USER_ASSIGNED_IDENTITY_NAME string = deployResources ? usermgdid.name : '' +output AZIDENTITY_USER_ASSIGNED_IDENTITY_OBJECT_ID string = deployResources ? usermgdid.properties.principalId : '' diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go index 294ed81e95..5791e7d224 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/username_password_credential.go @@ -17,6 +17,11 @@ import ( const credNameUserPassword = "UsernamePasswordCredential" // UsernamePasswordCredentialOptions contains optional parameters for UsernamePasswordCredential. +// +// Deprecated: UsernamePasswordCredential is deprecated because it can't support multifactor +// authentication. See [Entra ID documentation] for migration guidance. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type UsernamePasswordCredentialOptions struct { azcore.ClientOptions @@ -25,24 +30,31 @@ type UsernamePasswordCredentialOptions struct { // application is registered. AdditionallyAllowedTenants []string - // authenticationRecord returned by a call to a credential's Authenticate method. Set this option + // AuthenticationRecord returned by a call to a credential's Authenticate method. Set this option // to enable the credential to use data from a previous authentication. - authenticationRecord authenticationRecord + AuthenticationRecord AuthenticationRecord + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool - - // tokenCachePersistenceOptions enables persistent token caching when not nil. - tokenCachePersistenceOptions *tokenCachePersistenceOptions } // UsernamePasswordCredential authenticates a user with a password. Microsoft doesn't recommend this kind of authentication, // because it's less secure than other authentication flows. This credential is not interactive, so it isn't compatible -// with any form of multi-factor authentication, and the application must already have user or admin consent. +// with any form of multifactor authentication, and the application must already have user or admin consent. // This credential can only authenticate work and school accounts; it can't authenticate Microsoft accounts. +// +// Deprecated: this credential is deprecated because it can't support multifactor authentication. See [Entra ID documentation] +// for migration guidance. +// +// [Entra ID documentation]: https://aka.ms/azsdk/identity/mfa type UsernamePasswordCredential struct { client *publicClient } @@ -54,13 +66,13 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st options = &UsernamePasswordCredentialOptions{} } opts := publicClientOptions{ - AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, - ClientOptions: options.ClientOptions, - DisableInstanceDiscovery: options.DisableInstanceDiscovery, - Password: password, - Record: options.authenticationRecord, - TokenCachePersistenceOptions: options.tokenCachePersistenceOptions, - Username: username, + AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, + ClientOptions: options.ClientOptions, + DisableInstanceDiscovery: options.DisableInstanceDiscovery, + Password: password, + Record: options.AuthenticationRecord, + Username: username, } c, err := newPublicClient(tenantID, clientID, credNameUserPassword, opts) if err != nil { @@ -70,7 +82,7 @@ func NewUsernamePasswordCredential(tenantID string, clientID string, username st } // Authenticate the user. Subsequent calls to GetToken will automatically use the returned AuthenticationRecord. -func (c *UsernamePasswordCredential) authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (authenticationRecord, error) { +func (c *UsernamePasswordCredential) Authenticate(ctx context.Context, opts *policy.TokenRequestOptions) (AuthenticationRecord, error) { var err error ctx, endSpan := runtime.StartSpan(ctx, credNameUserPassword+"."+traceOpAuthenticate, c.client.azClient.Tracer(), nil) defer func() { endSpan(err) }() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go index 4305b5d3d8..584aabe1cb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/version.go @@ -14,5 +14,5 @@ const ( module = "github.com/Azure/azure-sdk-for-go/sdk/" + component // Version is the semantic version (see http://semver.org) of this module. - version = "v1.7.0" + version = "v1.9.0" ) diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go index 3e43e788e9..6fecada2f8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/azidentity/workload_identity.go @@ -39,15 +39,24 @@ type WorkloadIdentityCredentialOptions struct { // Add the wildcard value "*" to allow the credential to acquire tokens for any tenant in which the // application is registered. AdditionallyAllowedTenants []string + + // Cache is a persistent cache the credential will use to store the tokens it acquires, making + // them available to other processes and credential instances. The default, zero value means the + // credential will store tokens in memory and not share them with any other credential instance. + Cache Cache + // ClientID of the service principal. Defaults to the value of the environment variable AZURE_CLIENT_ID. ClientID string + // DisableInstanceDiscovery should be set true only by applications authenticating in disconnected clouds, or // private clouds such as Azure Stack. It determines whether the credential requests Microsoft Entra instance metadata // from https://login.microsoft.com before authenticating. Setting this to true will skip this request, making // the application responsible for ensuring the configured authority is valid and trustworthy. DisableInstanceDiscovery bool + // TenantID of the service principal. Defaults to the value of the environment variable AZURE_TENANT_ID. TenantID string + // TokenFilePath is the path of a file containing a Kubernetes service account token. Defaults to the value of the // environment variable AZURE_FEDERATED_TOKEN_FILE. TokenFilePath string @@ -81,6 +90,7 @@ func NewWorkloadIdentityCredential(options *WorkloadIdentityCredentialOptions) ( w := WorkloadIdentityCredential{file: file, mtx: &sync.RWMutex{}} caco := ClientAssertionCredentialOptions{ AdditionallyAllowedTenants: options.AdditionallyAllowedTenants, + Cache: options.Cache, ClientOptions: options.ClientOptions, DisableInstanceDiscovery: options.DisableInstanceDiscovery, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go index 4f1dcf1b78..76dadf7d35 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/log/log.go @@ -44,7 +44,7 @@ func Should(cls Event) bool { if log.lst == nil { return false } - if log.cls == nil || len(log.cls) == 0 { + if len(log.cls) == 0 { return true } for _, c := range log.cls { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go index 238ef42ed0..02aa1fb3bc 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/sdk/internal/temporal/resource.go @@ -11,9 +11,17 @@ import ( "time" ) +// backoff sets a minimum wait time between eager update attempts. It's a variable so tests can manipulate it. +var backoff = func(now, lastAttempt time.Time) bool { + return lastAttempt.Add(30 * time.Second).After(now) +} + // AcquireResource abstracts a method for refreshing a temporal resource. type AcquireResource[TResource, TState any] func(state TState) (newResource TResource, newExpiration time.Time, err error) +// ShouldRefresh abstracts a method for indicating whether a resource should be refreshed before expiration. +type ShouldRefresh[TResource, TState any] func(TResource, TState) bool + // Resource is a temporal resource (usually a credential) that requires periodic refreshing. type Resource[TResource, TState any] struct { // cond is used to synchronize access to the shared resource embodied by the remaining fields @@ -31,24 +39,43 @@ type Resource[TResource, TState any] struct { // lastAttempt indicates when a thread/goroutine last attempted to acquire/update the resource lastAttempt time.Time + // shouldRefresh indicates whether the resource should be refreshed before expiration + shouldRefresh ShouldRefresh[TResource, TState] + // acquireResource is the callback function that actually acquires the resource acquireResource AcquireResource[TResource, TState] } // NewResource creates a new Resource that uses the specified AcquireResource for refreshing. func NewResource[TResource, TState any](ar AcquireResource[TResource, TState]) *Resource[TResource, TState] { - return &Resource[TResource, TState]{cond: sync.NewCond(&sync.Mutex{}), acquireResource: ar} + r := &Resource[TResource, TState]{acquireResource: ar, cond: sync.NewCond(&sync.Mutex{})} + r.shouldRefresh = r.expiringSoon + return r +} + +// ResourceOptions contains optional configuration for Resource +type ResourceOptions[TResource, TState any] struct { + // ShouldRefresh indicates whether [Resource.Get] should acquire an updated resource despite + // the currently held resource not having expired. [Resource.Get] ignores all errors from + // refresh attempts triggered by ShouldRefresh returning true, and doesn't call ShouldRefresh + // when the resource has expired (it unconditionally updates expired resources). When + // ShouldRefresh is nil, [Resource.Get] refreshes the resource if it will expire within 5 + // minutes. + ShouldRefresh ShouldRefresh[TResource, TState] +} + +// NewResourceWithOptions creates a new Resource that uses the specified AcquireResource for refreshing. +func NewResourceWithOptions[TResource, TState any](ar AcquireResource[TResource, TState], opts ResourceOptions[TResource, TState]) *Resource[TResource, TState] { + r := NewResource(ar) + if opts.ShouldRefresh != nil { + r.shouldRefresh = opts.ShouldRefresh + } + return r } // Get returns the underlying resource. // If the resource is fresh, no refresh is performed. func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { - // If the resource is expiring within this time window, update it eagerly. - // This allows other threads/goroutines to keep running by using the not-yet-expired - // resource value while one thread/goroutine updates the resource. - const window = 5 * time.Minute // This example updates the resource 5 minutes prior to expiration - const backoff = 30 * time.Second // Minimum wait time between eager update attempts - now, acquire, expired := time.Now(), false, false // acquire exclusive lock @@ -65,9 +92,8 @@ func (er *Resource[TResource, TState]) Get(state TState) (TResource, error) { break } // Getting here means that this thread/goroutine will wait for the updated resource - } else if er.expiration.Add(-window).Before(now) { - // The resource is valid but is expiring within the time window - if !er.acquiring && er.lastAttempt.Add(backoff).Before(now) { + } else if er.shouldRefresh(resource, state) { + if !(er.acquiring || backoff(now, er.lastAttempt)) { // If another thread/goroutine is not acquiring/renewing the resource, and none has attempted // to do so within the last 30 seconds, this thread/goroutine will do it er.acquiring, acquire = true, true @@ -121,3 +147,8 @@ func (er *Resource[TResource, TState]) Expire() { // Reset the expiration as if we never got this resource to begin with er.expiration = time.Time{} } + +func (er *Resource[TResource, TState]) expiringSoon(TResource, TState) bool { + // call time.Now() instead of using Get's value so ShouldRefresh doesn't need a time.Time parameter + return er.expiration.Add(-5 * time.Minute).Before(time.Now()) +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go index f86286051d..549d68ab99 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/confidential/confidential.go @@ -18,6 +18,8 @@ import ( "encoding/pem" "errors" "fmt" + "os" + "strings" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" @@ -63,6 +65,13 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + // CertFromPEM converts a PEM file (.pem or .key) for use with [NewCredFromCert]. The file // must contain the public certificate and the private key. If a PEM block is encrypted and // password is not an empty string, it attempts to decrypt the PEM blocks using the password. @@ -303,7 +312,9 @@ func WithInstanceDiscovery(enabled bool) Option { // If an invalid region name is provided, the non-regional endpoint MIGHT be used or the token request MIGHT fail. func WithAzureRegion(val string) Option { return func(o *clientOptions) { - o.azureRegion = val + if val != "" { + o.azureRegion = val + } } } @@ -315,16 +326,21 @@ func New(authority, clientID string, cred Credential, options ...Option) (Client if err != nil { return Client{}, err } - + autoEnabledRegion := os.Getenv("MSAL_FORCE_REGION") opts := clientOptions{ authority: authority, // if the caller specified a token provider, it will handle all details of authentication, using Client only as a token cache disableInstanceDiscovery: cred.tokenProvider != nil, httpClient: shared.DefaultClient, + azureRegion: autoEnabledRegion, } for _, o := range options { o(&opts) } + if strings.EqualFold(opts.azureRegion, "DisableMsalForceRegion") { + opts.azureRegion = "" + } + baseOpts := []base.Option{ base.WithCacheAccessor(opts.accessor), base.WithClientCapabilities(opts.capabilities), @@ -422,6 +438,7 @@ func WithClaims(claims string) interface { AcquireByAuthCodeOption AcquireByCredentialOption AcquireOnBehalfOfOption + AcquireByUsernamePasswordOption AcquireSilentOption AuthCodeURLOption options.CallOption @@ -430,6 +447,7 @@ func WithClaims(claims string) interface { AcquireByAuthCodeOption AcquireByCredentialOption AcquireOnBehalfOfOption + AcquireByUsernamePasswordOption AcquireSilentOption AuthCodeURLOption options.CallOption @@ -443,6 +461,8 @@ func WithClaims(claims string) interface { t.claims = claims case *acquireTokenOnBehalfOfOptions: t.claims = claims + case *acquireTokenByUsernamePasswordOptions: + t.claims = claims case *acquireTokenSilentOptions: t.claims = claims case *authCodeURLOptions: @@ -489,6 +509,7 @@ func WithTenantID(tenantID string) interface { AcquireByAuthCodeOption AcquireByCredentialOption AcquireOnBehalfOfOption + AcquireByUsernamePasswordOption AcquireSilentOption AuthCodeURLOption options.CallOption @@ -497,6 +518,7 @@ func WithTenantID(tenantID string) interface { AcquireByAuthCodeOption AcquireByCredentialOption AcquireOnBehalfOfOption + AcquireByUsernamePasswordOption AcquireSilentOption AuthCodeURLOption options.CallOption @@ -510,6 +532,8 @@ func WithTenantID(tenantID string) interface { t.tenantID = tenantID case *acquireTokenOnBehalfOfOptions: t.tenantID = tenantID + case *acquireTokenByUsernamePasswordOptions: + t.tenantID = tenantID case *acquireTokenSilentOptions: t.tenantID = tenantID case *authCodeURLOptions: @@ -585,6 +609,46 @@ func (cca Client) AcquireTokenSilent(ctx context.Context, scopes []string, opts return cca.base.AcquireTokenSilent(ctx, silentParameters) } +// acquireTokenByUsernamePasswordOptions contains optional configuration for AcquireTokenByUsernamePassword +type acquireTokenByUsernamePasswordOptions struct { + claims, tenantID string + authnScheme AuthenticationScheme +} + +// AcquireByUsernamePasswordOption is implemented by options for AcquireTokenByUsernamePassword +type AcquireByUsernamePasswordOption interface { + acquireByUsernamePasswordOption() +} + +// AcquireTokenByUsernamePassword acquires a security token from the authority, via Username/Password Authentication. +// NOTE: this flow is NOT recommended. +// +// Options: [WithClaims], [WithTenantID] +func (cca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []string, username, password string, opts ...AcquireByUsernamePasswordOption) (AuthResult, error) { + o := acquireTokenByUsernamePasswordOptions{} + if err := options.ApplyOptions(&o, opts); err != nil { + return AuthResult{}, err + } + authParams, err := cca.base.AuthParams.WithTenant(o.tenantID) + if err != nil { + return AuthResult{}, err + } + authParams.Scopes = scopes + authParams.AuthorizationType = authority.ATUsernamePassword + authParams.Claims = o.claims + authParams.Username = username + authParams.Password = password + if o.authnScheme != nil { + authParams.AuthnScheme = o.authnScheme + } + + token, err := cca.base.Token.UsernamePassword(ctx, authParams) + if err != nil { + return AuthResult{}, err + } + return cca.base.AuthResultFromToken(ctx, authParams, token) +} + // acquireTokenByAuthCodeOptions contains the optional parameters used to acquire an access token using the authorization code flow. type acquireTokenByAuthCodeOptions struct { challenge, claims, tenantID string @@ -676,7 +740,7 @@ func (cca Client) AcquireTokenByCredential(ctx context.Context, scopes []string, if err != nil { return AuthResult{}, err } - return cca.base.AuthResultFromToken(ctx, authParams, token, true) + return cca.base.AuthResultFromToken(ctx, authParams, token) } // acquireTokenOnBehalfOfOptions contains optional configuration for AcquireTokenOnBehalfOf diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go index c9b8dbed08..b5cbb57217 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors/errors.go @@ -64,11 +64,20 @@ type CallErr struct { Err error } +type InvalidJsonErr struct { + Err error +} + // Errors implements error.Error(). func (e CallErr) Error() string { return e.Err.Error() } +// Errors implements error.Error(). +func (e InvalidJsonErr) Error() string { + return e.Err.Error() +} + // Verbose prints a versbose error message with the request or response. func (e CallErr) Verbose() string { e.Resp.Request = nil // This brings in a bunch of TLS crap we don't need diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go index 09a0d92f52..61c1c4cec1 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/base.go @@ -5,16 +5,17 @@ package base import ( "context" - "errors" "fmt" "net/url" "reflect" "strings" "sync" + "sync/atomic" "time" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/cache" - "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" @@ -89,14 +90,28 @@ type AuthResult struct { ExpiresOn time.Time GrantedScopes []string DeclinedScopes []string + Metadata AuthResultMetadata } +// AuthResultMetadata which contains meta data for the AuthResult +type AuthResultMetadata struct { + RefreshOn time.Time + TokenSource TokenSource +} + +type TokenSource int + +// These are all the types of token flows. +const ( + TokenSourceIdentityProvider TokenSource = 0 + TokenSourceCache TokenSource = 1 +) + // AuthResultFromStorage creates an AuthResult from a storage token response (which is generated from the cache). func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResult, error) { if err := storageTokenResponse.AccessToken.Validate(); err != nil { return AuthResult{}, fmt.Errorf("problem with access token in StorageTokenResponse: %w", err) } - account := storageTokenResponse.Account accessToken := storageTokenResponse.AccessToken.Secret grantedScopes := strings.Split(storageTokenResponse.AccessToken.Scopes, scopeSeparator) @@ -109,7 +124,18 @@ func AuthResultFromStorage(storageTokenResponse storage.TokenResponse) (AuthResu return AuthResult{}, fmt.Errorf("problem decoding JWT token: %w", err) } } - return AuthResult{account, idToken, accessToken, storageTokenResponse.AccessToken.ExpiresOn.T, grantedScopes, nil}, nil + return AuthResult{ + Account: account, + IDToken: idToken, + AccessToken: accessToken, + ExpiresOn: storageTokenResponse.AccessToken.ExpiresOn.T, + GrantedScopes: grantedScopes, + DeclinedScopes: nil, + Metadata: AuthResultMetadata{ + TokenSource: TokenSourceCache, + RefreshOn: storageTokenResponse.AccessToken.RefreshOn.T, + }, + }, nil } // NewAuthResult creates an AuthResult. @@ -121,8 +147,12 @@ func NewAuthResult(tokenResponse accesstokens.TokenResponse, account shared.Acco Account: account, IDToken: tokenResponse.IDToken, AccessToken: tokenResponse.AccessToken, - ExpiresOn: tokenResponse.ExpiresOn.T, + ExpiresOn: tokenResponse.ExpiresOn, GrantedScopes: tokenResponse.GrantedScopes.Slice, + Metadata: AuthResultMetadata{ + TokenSource: TokenSourceIdentityProvider, + RefreshOn: tokenResponse.RefreshOn.T, + }, }, nil } @@ -137,6 +167,8 @@ type Client struct { AuthParams authority.AuthParams // DO NOT EVER MAKE THIS A POINTER! See "Note" in New(). cacheAccessor cache.ExportReplace cacheAccessorMu *sync.RWMutex + canRefresh map[string]*atomic.Value + canRefreshMu *sync.Mutex } // Option is an optional argument to the New constructor. @@ -213,6 +245,8 @@ func New(clientID string, authorityURI string, token *oauth.Client, options ...O cacheAccessorMu: &sync.RWMutex{}, manager: storage.New(token), pmanager: storage.NewPartitionedManager(token), + canRefresh: make(map[string]*atomic.Value), + canRefreshMu: &sync.Mutex{}, } for _, o := range options { if err = o(&client); err != nil { @@ -317,6 +351,28 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if silent.Claims == "" { ar, err = AuthResultFromStorage(storageTokenResponse) if err == nil { + if rt := storageTokenResponse.AccessToken.RefreshOn.T; !rt.IsZero() && Now().After(rt) { + b.canRefreshMu.Lock() + refreshValue, ok := b.canRefresh[tenant] + if !ok { + refreshValue = &atomic.Value{} + refreshValue.Store(false) + b.canRefresh[tenant] = refreshValue + } + b.canRefreshMu.Unlock() + if refreshValue.CompareAndSwap(false, true) { + defer refreshValue.Store(false) + // Added a check to see if the token is still same because there is a chance + // that the token is already refreshed by another thread. + // If the token is not same, we don't need to refresh it. + // Which means it refreshed. + if str, err := m.Read(ctx, authParams); err == nil && str.AccessToken.Secret == ar.AccessToken { + if tr, er := b.Token.Credential(ctx, authParams, silent.Credential); er == nil { + return b.AuthResultFromToken(ctx, authParams, tr) + } + } + } + } ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) return ar, err } @@ -334,7 +390,7 @@ func (b Client) AcquireTokenSilent(ctx context.Context, silent AcquireTokenSilen if err != nil { return ar, err } - return b.AuthResultFromToken(ctx, authParams, token, true) + return b.AuthResultFromToken(ctx, authParams, token) } func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams AcquireTokenAuthCodeParameters) (AuthResult, error) { @@ -363,7 +419,7 @@ func (b Client) AcquireTokenByAuthCode(ctx context.Context, authCodeParams Acqui return AuthResult{}, err } - return b.AuthResultFromToken(ctx, authParams, token, true) + return b.AuthResultFromToken(ctx, authParams, token) } // AcquireTokenOnBehalfOf acquires a security token for an app using middle tier apps access token. @@ -392,15 +448,12 @@ func (b Client) AcquireTokenOnBehalfOf(ctx context.Context, onBehalfOfParams Acq authParams.UserAssertion = onBehalfOfParams.UserAssertion token, err := b.Token.OnBehalfOf(ctx, authParams, onBehalfOfParams.Credential) if err == nil { - ar, err = b.AuthResultFromToken(ctx, authParams, token, true) + ar, err = b.AuthResultFromToken(ctx, authParams, token) } return ar, err } -func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse, cacheWrite bool) (AuthResult, error) { - if !cacheWrite { - return NewAuthResult(token, shared.Account{}) - } +func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) { var m manager = b.manager if authParams.AuthorizationType == authority.ATOnBehalfOf { m = b.pmanager @@ -430,6 +483,10 @@ func (b Client) AuthResultFromToken(ctx context.Context, authParams authority.Au return ar, err } +// This function wraps time.Now() and is used for refreshing the application +// was created to test the function against refreshin +var Now = time.Now + func (b Client) AllAccounts(ctx context.Context) ([]shared.Account, error) { if b.cacheAccessor != nil { b.cacheAccessorMu.RLock() diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go similarity index 95% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go index f9be90276d..7379e2233c 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/items.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/items.go @@ -72,6 +72,7 @@ type AccessToken struct { ClientID string `json:"client_id,omitempty"` Secret string `json:"secret,omitempty"` Scopes string `json:"target,omitempty"` + RefreshOn internalTime.Unix `json:"refresh_on,omitempty"` ExpiresOn internalTime.Unix `json:"expires_on,omitempty"` ExtendedExpiresOn internalTime.Unix `json:"extended_expires_on,omitempty"` CachedAt internalTime.Unix `json:"cached_at,omitempty"` @@ -83,7 +84,7 @@ type AccessToken struct { } // NewAccessToken is the constructor for AccessToken. -func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { +func NewAccessToken(homeID, env, realm, clientID string, cachedAt, refreshOn, expiresOn, extendedExpiresOn time.Time, scopes, token, tokenType, authnSchemeKeyID string) AccessToken { return AccessToken{ HomeAccountID: homeID, Environment: env, @@ -93,6 +94,7 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex Secret: token, Scopes: scopes, CachedAt: internalTime.Unix{T: cachedAt.UTC()}, + RefreshOn: internalTime.Unix{T: refreshOn.UTC()}, ExpiresOn: internalTime.Unix{T: expiresOn.UTC()}, ExtendedExpiresOn: internalTime.Unix{T: extendedExpiresOn.UTC()}, TokenType: tokenType, @@ -102,8 +104,9 @@ func NewAccessToken(homeID, env, realm, clientID string, cachedAt, expiresOn, ex // Key outputs the key that can be used to uniquely look up this entry in a map. func (a AccessToken) Key() string { + ks := []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes} key := strings.Join( - []string{a.HomeAccountID, a.Environment, a.CredentialType, a.ClientID, a.Realm, a.Scopes}, + ks, shared.CacheKeySeparator, ) // add token type to key for new access tokens types. skip for bearer token type to diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go similarity index 99% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go index c093183306..ff07d4b5a4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/partitioned_storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/partitioned_storage.go @@ -114,7 +114,8 @@ func (m *PartitionedManager) Write(authParameters authority.AuthParams, tokenRes realm, clientID, cachedAt, - tokenResponse.ExpiresOn.T, + tokenResponse.RefreshOn.T, + tokenResponse.ExpiresOn, tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go similarity index 98% rename from vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go rename to vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go index 2221e60c43..84a234967f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/internal/storage/storage.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage/storage.go @@ -173,6 +173,7 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces environment := authParameters.AuthorityInfo.Host realm := authParameters.AuthorityInfo.Tenant clientID := authParameters.ClientID + target := strings.Join(tokenResponse.GrantedScopes.Slice, scopeSeparator) cachedAt := time.Now() authnSchemeKeyID := authParameters.AuthnScheme.KeyID() @@ -193,7 +194,8 @@ func (m *Manager) Write(authParameters authority.AuthParams, tokenResponse acces realm, clientID, cachedAt, - tokenResponse.ExpiresOn.T, + tokenResponse.RefreshOn.T, + tokenResponse.ExpiresOn, tokenResponse.ExtExpiresOn.T, target, tokenResponse.AccessToken, @@ -265,6 +267,9 @@ func (m *Manager) aadMetadataFromCache(ctx context.Context, authorityInfo author } func (m *Manager) aadMetadata(ctx context.Context, authorityInfo authority.Info) (authority.InstanceDiscoveryMetadata, error) { + if m.requests == nil { + return authority.InstanceDiscoveryMetadata{}, fmt.Errorf("httpclient in oauth instance for fetching metadata is nil") + } m.aadCacheMu.Lock() defer m.aadCacheMu.Unlock() discoveryResponse, err := m.requests.AADInstanceDiscovery(ctx, authorityInfo) @@ -459,6 +464,7 @@ func (m *Manager) readAccount(homeAccountID string, envAliases []string, realm s func (m *Manager) writeAccount(account shared.Account) error { key := account.Key() + m.contractMu.Lock() defer m.contractMu.Unlock() m.contract.Accounts[key] = account diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go index 7b673e3fe1..de1bf381f4 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported/exported.go @@ -31,4 +31,6 @@ type TokenProviderResult struct { AccessToken string // ExpiresInSeconds is the lifetime of the token in seconds ExpiresInSeconds int + // RefreshInSeconds indicates the suggested time to refresh the token, if any + RefreshInSeconds int } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go index 2238521f5f..2134e57c9e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/json.go @@ -18,10 +18,6 @@ import ( ) const addField = "AdditionalFields" -const ( - marshalJSON = "MarshalJSON" - unmarshalJSON = "UnmarshalJSON" -) var ( leftBrace = []byte("{")[0] @@ -106,48 +102,38 @@ func delimIs(got json.Token, want rune) bool { // hasMarshalJSON will determine if the value or a pointer to this value has // the MarshalJSON method. func hasMarshalJSON(v reflect.Value) bool { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok - } - - if v.Kind() == reflect.Ptr { - v = v.Elem() - } else { - if !v.CanAddr() { - return false + ok := false + if _, ok = v.Interface().(json.Marshaler); !ok { + var i any + if v.Kind() == reflect.Ptr { + i = v.Elem().Interface() + } else if v.CanAddr() { + i = v.Addr().Interface() } - v = v.Addr() - } - - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Marshaler) - return ok + _, ok = i.(json.Marshaler) } - return false + return ok } // callMarshalJSON will call MarshalJSON() method on the value or a pointer to this value. // This will panic if the method is not defined. func callMarshalJSON(v reflect.Value) ([]byte, error) { - if method := v.MethodByName(marshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) + if marsh, ok := v.Interface().(json.Marshaler); ok { return marsh.MarshalJSON() } if v.Kind() == reflect.Ptr { - v = v.Elem() + if marsh, ok := v.Elem().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } else { if v.CanAddr() { - v = v.Addr() + if marsh, ok := v.Addr().Interface().(json.Marshaler); ok { + return marsh.MarshalJSON() + } } } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - marsh := v.Interface().(json.Marshaler) - return marsh.MarshalJSON() - } - panic(fmt.Sprintf("callMarshalJSON called on type %T that does not have MarshalJSON defined", v.Interface())) } @@ -162,12 +148,8 @@ func hasUnmarshalJSON(v reflect.Value) bool { v = v.Addr() } - if method := v.MethodByName(unmarshalJSON); method.Kind() != reflect.Invalid { - _, ok := v.Interface().(json.Unmarshaler) - return ok - } - - return false + _, ok := v.Interface().(json.Unmarshaler) + return ok } // hasOmitEmpty indicates if the field has instructed us to not output diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go index 04236ff312..cda678e334 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/local/server.go @@ -7,6 +7,7 @@ package local import ( "context" "fmt" + "html" "net" "net/http" "strconv" @@ -141,11 +142,12 @@ func (s *Server) handler(w http.ResponseWriter, r *http.Request) { headerErr := q.Get("error") if headerErr != "" { - desc := q.Get("error_description") + desc := html.EscapeString(q.Get("error_description")) // Note: It is a little weird we handle some errors by not going to the failPage. If they all should, // change this to s.error() and make s.error() write the failPage instead of an error code. _, _ = w.Write([]byte(fmt.Sprintf(failPage, headerErr, desc))) - s.putResult(Result{Err: fmt.Errorf(desc)}) + s.putResult(Result{Err: fmt.Errorf("%s", desc)}) + return } diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go index ef8d908a44..738a29eb9d 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/oauth.go @@ -10,6 +10,8 @@ import ( "io" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/exported" internalTime "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json/types/time" @@ -18,7 +20,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/wstrust/defs" - "github.com/google/uuid" ) // ResolveEndpointer contains the methods for resolving authority endpoints. @@ -110,7 +111,7 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams Scopes: scopes, TenantID: authParams.AuthorityInfo.Tenant, } - tr, err := cred.TokenProvider(ctx, params) + pr, err := cred.TokenProvider(ctx, params) if err != nil { if len(scopes) == 0 { err = fmt.Errorf("token request had an empty authority.AuthParams.Scopes, which may cause the following error: %w", err) @@ -118,14 +119,18 @@ func (t *Client) Credential(ctx context.Context, authParams authority.AuthParams } return accesstokens.TokenResponse{}, err } - return accesstokens.TokenResponse{ - TokenType: authParams.AuthnScheme.AccessTokenType(), - AccessToken: tr.AccessToken, - ExpiresOn: internalTime.DurationTime{ - T: now.Add(time.Duration(tr.ExpiresInSeconds) * time.Second), - }, + tr := accesstokens.TokenResponse{ + TokenType: authParams.AuthnScheme.AccessTokenType(), + AccessToken: pr.AccessToken, + ExpiresOn: now.Add(time.Duration(pr.ExpiresInSeconds) * time.Second), GrantedScopes: accesstokens.Scopes{Slice: authParams.Scopes}, - }, nil + } + if pr.RefreshInSeconds > 0 { + tr.RefreshOn = internalTime.DurationTime{ + T: now.Add(time.Duration(pr.RefreshInSeconds) * time.Second), + } + } + return tr, nil } if err := t.resolveEndpoint(ctx, &authParams, ""); err != nil { @@ -331,7 +336,7 @@ func (t *Client) DeviceCode(ctx context.Context, authParams authority.AuthParams func (t *Client) resolveEndpoint(ctx context.Context, authParams *authority.AuthParams, userPrincipalName string) error { endpoints, err := t.Resolver.ResolveEndpoints(ctx, authParams.AuthorityInfo, userPrincipalName) if err != nil { - return fmt.Errorf("unable to resolve an endpoint: %s", err) + return fmt.Errorf("unable to resolve an endpoint: %w", err) } authParams.Endpoints = endpoints return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go index a7b7b0742d..d738c7591e 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/accesstokens.go @@ -17,6 +17,7 @@ import ( /* #nosec */ "crypto/sha1" + "crypto/sha256" "crypto/x509" "encoding/base64" "encoding/json" @@ -68,7 +69,7 @@ type DeviceCodeResponse struct { UserCode string `json:"user_code"` DeviceCode string `json:"device_code"` - VerificationURL string `json:"verification_url"` + VerificationURL string `json:"verification_uri"` ExpiresIn int `json:"expires_in"` Interval int `json:"interval"` Message string `json:"message"` @@ -112,19 +113,31 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) ( } return c.AssertionCallback(ctx, options) } - - token := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ + claims := jwt.MapClaims{ "aud": authParams.Endpoints.TokenEndpoint, "exp": json.Number(strconv.FormatInt(time.Now().Add(10*time.Minute).Unix(), 10)), "iss": authParams.ClientID, "jti": uuid.New().String(), "nbf": json.Number(strconv.FormatInt(time.Now().Unix(), 10)), "sub": authParams.ClientID, - }) + } + + isADFSorDSTS := authParams.AuthorityInfo.AuthorityType == authority.ADFS || + authParams.AuthorityInfo.AuthorityType == authority.DSTS + + var signingMethod jwt.SigningMethod = jwt.SigningMethodPS256 + thumbprintKey := "x5t#S256" + + if isADFSorDSTS { + signingMethod = jwt.SigningMethodRS256 + thumbprintKey = "x5t" + } + + token := jwt.NewWithClaims(signingMethod, claims) token.Header = map[string]interface{}{ - "alg": "RS256", - "typ": "JWT", - "x5t": base64.StdEncoding.EncodeToString(thumbprint(c.Cert)), + "alg": signingMethod.Alg(), + "typ": "JWT", + thumbprintKey: base64.StdEncoding.EncodeToString(thumbprint(c.Cert, signingMethod.Alg())), } if authParams.SendX5C { @@ -133,17 +146,23 @@ func (c *Credential) JWT(ctx context.Context, authParams authority.AuthParams) ( assertion, err := token.SignedString(c.Key) if err != nil { - return "", fmt.Errorf("unable to sign a JWT token using private key: %w", err) + return "", fmt.Errorf("unable to sign JWT token: %w", err) } + return assertion, nil } // thumbprint runs the asn1.Der bytes through sha1 for use in the x5t parameter of JWT. // https://tools.ietf.org/html/rfc7517#section-4.8 -func thumbprint(cert *x509.Certificate) []byte { - /* #nosec */ - a := sha1.Sum(cert.Raw) - return a[:] +func thumbprint(cert *x509.Certificate, alg string) []byte { + switch alg { + case jwt.SigningMethodRS256.Name: // identity providers like ADFS don't support SHA256 assertions, so need to support this + hash := sha1.Sum(cert.Raw) /* #nosec */ + return hash[:] + default: + hash := sha256.Sum256(cert.Raw) + return hash[:] + } } // Client represents the REST calls to get tokens from token generator backends. @@ -262,11 +281,7 @@ func (c Client) FromClientSecret(ctx context.Context, authParameters authority.A qv.Set(clientID, authParameters.ClientID) addScopeQueryParam(qv, authParameters) - token, err := c.doTokenResp(ctx, authParameters, qv) - if err != nil { - return token, fmt.Errorf("FromClientSecret(): %w", err) - } - return token, nil + return c.doTokenResp(ctx, authParameters, qv) } func (c Client) FromAssertion(ctx context.Context, authParameters authority.AuthParams, assertion string) (TokenResponse, error) { @@ -281,11 +296,7 @@ func (c Client) FromAssertion(ctx context.Context, authParameters authority.Auth qv.Set(clientInfo, clientInfoVal) addScopeQueryParam(qv, authParameters) - token, err := c.doTokenResp(ctx, authParameters, qv) - if err != nil { - return token, fmt.Errorf("FromAssertion(): %w", err) - } - return token, nil + return c.doTokenResp(ctx, authParameters, qv) } func (c Client) FromUserAssertionClientSecret(ctx context.Context, authParameters authority.AuthParams, userAssertion string, clientSecret string) (TokenResponse, error) { diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go index 3107b45c11..32dde7b76b 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens/tokens.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "reflect" + "strconv" "strings" "time" @@ -173,14 +174,75 @@ type TokenResponse struct { FamilyID string `json:"foci"` IDToken IDToken `json:"id_token"` ClientInfo ClientInfo `json:"client_info"` - ExpiresOn internalTime.DurationTime `json:"expires_in"` + RefreshOn internalTime.DurationTime `json:"refresh_in,omitempty"` + ExpiresOn time.Time `json:"-"` ExtExpiresOn internalTime.DurationTime `json:"ext_expires_in"` GrantedScopes Scopes `json:"scope"` DeclinedScopes []string // This is derived AdditionalFields map[string]interface{} + scopesComputed bool +} + +func (tr *TokenResponse) UnmarshalJSON(data []byte) error { + type Alias TokenResponse + aux := &struct { + ExpiresIn internalTime.DurationTime `json:"expires_in,omitempty"` + ExpiresOn any `json:"expires_on,omitempty"` + *Alias + }{ + Alias: (*Alias)(tr), + } + + // Unmarshal the JSON data into the aux struct + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + // Function to parse different date formats + // This is a workaround for the issue described here: + // https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/issues/4963 + parseExpiresOn := func(expiresOn string) (time.Time, error) { + var formats = []string{ + "01/02/2006 15:04:05", // MM/dd/yyyy HH:mm:ss + "2006-01-02 15:04:05", // yyyy-MM-dd HH:mm:ss + time.RFC3339Nano, // ISO 8601 (with nanosecond precision) + } + + for _, format := range formats { + if t, err := time.Parse(format, expiresOn); err == nil { + return t, nil + } + } + return time.Time{}, fmt.Errorf("invalid ExpiresOn format: %s", expiresOn) + } - scopesComputed bool + if expiresOnStr, ok := aux.ExpiresOn.(string); ok { + if ts, err := strconv.ParseInt(expiresOnStr, 10, 64); err == nil { + tr.ExpiresOn = time.Unix(ts, 0) + return nil + } + if expiresOnStr != "" { + if t, err := parseExpiresOn(expiresOnStr); err != nil { + return err + } else { + tr.ExpiresOn = t + return nil + } + } + } + + // Check if ExpiresOn is a number (Unix timestamp or ISO 8601) + if expiresOnNum, ok := aux.ExpiresOn.(float64); ok { + tr.ExpiresOn = time.Unix(int64(expiresOnNum), 0) + return nil + } + + if !aux.ExpiresIn.T.IsZero() { + tr.ExpiresOn = aux.ExpiresIn.T + return nil + } + return errors.New("expires_in and expires_on are both missing or invalid") } // ComputeScope computes the final scopes based on what was granted by the server and diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go index 9d60734f88..c3c4a96fc3 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority/authority.go @@ -23,7 +23,7 @@ import ( const ( authorizationEndpoint = "https://%v/%v/oauth2/v2.0/authorize" - instanceDiscoveryEndpoint = "https://%v/common/discovery/instance" + aadInstanceDiscoveryEndpoint = "https://%v/common/discovery/instance" tenantDiscoveryEndpointWithRegion = "https://%s.%s/%s/v2.0/.well-known/openid-configuration" regionName = "REGION_NAME" defaultAPIVersion = "2021-10-01" @@ -47,13 +47,12 @@ type jsonCaller interface { } var aadTrustedHostList = map[string]bool{ - "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list - "login.chinacloudapi.cn": true, // Microsoft Azure China - "login.microsoftonline.de": true, // Microsoft Azure Blackforest - "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy - "login.microsoftonline.us": true, // Microsoft Azure US Government - "login.microsoftonline.com": true, // Microsoft Azure Worldwide - "login.cloudgovapi.us": true, // Microsoft Azure US Government + "login.windows.net": true, // Microsoft Azure Worldwide - Used in validation scenarios where host is not this list + "login.partner.microsoftonline.cn": true, // Microsoft Azure China + "login.microsoftonline.de": true, // Microsoft Azure Blackforest + "login-us.microsoftonline.com": true, // Microsoft Azure US Government - Legacy + "login.microsoftonline.us": true, // Microsoft Azure US Government + "login.microsoftonline.com": true, // Microsoft Azure Worldwide } // TrustedHost checks if an AAD host is trusted/valid. @@ -137,8 +136,12 @@ const ( const ( AAD = "MSSTS" ADFS = "ADFS" + DSTS = "DSTS" ) +// DSTSTenant is referenced throughout multiple files, let us use a const in case we ever need to change it. +const DSTSTenant = "7a433bfc-2514-4697-b467-e0933190487f" + // AuthenticationScheme is an extensibility mechanism designed to be used only by Azure Arc for proof of possession access tokens. type AuthenticationScheme interface { // Extra parameters that are added to the request to the /token endpoint. @@ -236,23 +239,26 @@ func NewAuthParams(clientID string, authorityInfo Info) AuthParams { // - the client is configured to authenticate only Microsoft accounts via the "consumers" endpoint // - the resulting authority URL is invalid func (p AuthParams) WithTenant(ID string) (AuthParams, error) { - switch ID { - case "", p.AuthorityInfo.Tenant: - // keep the default tenant because the caller didn't override it + if ID == "" || ID == p.AuthorityInfo.Tenant { return p, nil - case "common", "consumers", "organizations": - if p.AuthorityInfo.AuthorityType == AAD { + } + + var authority string + switch p.AuthorityInfo.AuthorityType { + case AAD: + if ID == "common" || ID == "consumers" || ID == "organizations" { return p, fmt.Errorf(`tenant ID must be a specific tenant, not "%s"`, ID) } - // else we'll return a better error below - } - if p.AuthorityInfo.AuthorityType != AAD { - return p, errors.New("the authority doesn't support tenants") - } - if p.AuthorityInfo.Tenant == "consumers" { - return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + if p.AuthorityInfo.Tenant == "consumers" { + return p, errors.New(`client is configured to authenticate only personal Microsoft accounts, via the "consumers" endpoint`) + } + authority = "https://" + path.Join(p.AuthorityInfo.Host, ID) + case ADFS: + return p, errors.New("ADFS authority doesn't support tenants") + case DSTS: + return p, errors.New("dSTS authority doesn't support tenants") } - authority := "https://" + path.Join(p.AuthorityInfo.Host, ID) + info, err := NewInfoFromAuthorityURI(authority, p.AuthorityInfo.ValidateAuthority, p.AuthorityInfo.InstanceDiscoveryDisabled) if err == nil { info.Region = p.AuthorityInfo.Region @@ -344,44 +350,57 @@ type Info struct { Host string CanonicalAuthorityURI string AuthorityType string - UserRealmURIPrefix string ValidateAuthority bool Tenant string Region string InstanceDiscoveryDisabled bool } -func firstPathSegment(u *url.URL) (string, error) { - pathParts := strings.Split(u.EscapedPath(), "/") - if len(pathParts) >= 2 { - return pathParts[1], nil - } - - return "", errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) -} - // NewInfoFromAuthorityURI creates an AuthorityInfo instance from the authority URL provided. func NewInfoFromAuthorityURI(authority string, validateAuthority bool, instanceDiscoveryDisabled bool) (Info, error) { - u, err := url.Parse(strings.ToLower(authority)) - if err != nil || u.Scheme != "https" { - return Info{}, errors.New(`authority must be an https URL such as "https://login.microsoftonline.com/"`) + + cannonicalAuthority := authority + + // suffix authority with / if it doesn't have one + if !strings.HasSuffix(cannonicalAuthority, "/") { + cannonicalAuthority += "/" } - tenant, err := firstPathSegment(u) + u, err := url.Parse(strings.ToLower(cannonicalAuthority)) + if err != nil { - return Info{}, err + return Info{}, fmt.Errorf("couldn't parse authority url: %w", err) + } + if u.Scheme != "https" { + return Info{}, errors.New("authority url scheme must be https") + } + + pathParts := strings.Split(u.EscapedPath(), "/") + if len(pathParts) < 3 { + return Info{}, errors.New(`authority must be an URL such as "https://login.microsoftonline.com/"`) } + authorityType := AAD - if tenant == "adfs" { + tenant := pathParts[1] + switch tenant { + case "adfs": authorityType = ADFS + case "dstsv2": + if len(pathParts) != 4 { + return Info{}, fmt.Errorf("dSTS authority must be an https URL such as https:///dstsv2/%s", DSTSTenant) + } + if pathParts[2] != DSTSTenant { + return Info{}, fmt.Errorf("dSTS authority only accepts a single tenant %q", DSTSTenant) + } + authorityType = DSTS + tenant = DSTSTenant } // u.Host includes the port, if any, which is required for private cloud deployments return Info{ Host: u.Host, - CanonicalAuthorityURI: fmt.Sprintf("https://%v/%v/", u.Host, tenant), + CanonicalAuthorityURI: cannonicalAuthority, AuthorityType: authorityType, - UserRealmURIPrefix: fmt.Sprintf("https://%v/common/userrealm/", u.Hostname()), ValidateAuthority: validateAuthority, Tenant: tenant, InstanceDiscoveryDisabled: instanceDiscoveryDisabled, @@ -525,7 +544,7 @@ func (c Client) AADInstanceDiscovery(ctx context.Context, authorityInfo Info) (I discoveryHost = authorityInfo.Host } - endpoint := fmt.Sprintf(instanceDiscoveryEndpoint, discoveryHost) + endpoint := fmt.Sprintf(aadInstanceDiscoveryEndpoint, discoveryHost) err = c.Comm.JSONCall(ctx, endpoint, http.Header{}, qv, nil, &resp) } return resp, err @@ -543,17 +562,19 @@ func detectRegion(ctx context.Context) string { client := http.Client{ Timeout: time.Duration(2 * time.Second), } - req, _ := http.NewRequest("GET", imdsEndpoint, nil) + req, _ := http.NewRequestWithContext(ctx, http.MethodGet, imdsEndpoint, nil) req.Header.Set("Metadata", "true") resp, err := client.Do(req) + if err == nil { + defer resp.Body.Close() + } // If the request times out or there is an error, it is retried once - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { resp, err = client.Do(req) - if err != nil || resp.StatusCode != 200 { + if err != nil || resp.StatusCode != http.StatusOK { return "" } } - defer resp.Body.Close() response, err := io.ReadAll(resp.Body) if err != nil { return "" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go index 7d9ec7cd37..7906803669 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/internal/comm/comm.go @@ -18,10 +18,11 @@ import ( "strings" "time" + "github.com/google/uuid" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" customJSON "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/json" "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version" - "github.com/google/uuid" ) // HTTPClient represents an HTTP client. @@ -70,15 +71,13 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea unmarshal = customJSON.Unmarshal } - u, err := url.Parse(endpoint) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s?%s", endpoint, qv.Encode()), nil) if err != nil { - return fmt.Errorf("could not parse path URL(%s): %w", endpoint, err) + return fmt.Errorf("could not create request: %w", err) } - u.RawQuery = qv.Encode() addStdHeaders(headers) - - req := &http.Request{Method: http.MethodGet, URL: u, Header: headers} + req.Header = headers if body != nil { // Note: In case your wondering why we are not gzip encoding.... @@ -99,7 +98,7 @@ func (c *Client) JSONCall(ctx context.Context, endpoint string, headers http.Hea if resp != nil { if err := unmarshal(data, resp); err != nil { - return fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data)) + return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\njson message bytes were: %s", err, string(data))} } } return nil @@ -222,7 +221,7 @@ func (c *Client) URLFormCall(ctx context.Context, endpoint string, qv url.Values } if resp != nil { if err := unmarshal(data, resp); err != nil { - return fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data)) + return errors.InvalidJsonErr{Err: fmt.Errorf("json decode error: %w\nraw message was: %s", err, string(data))} } } return nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go index 0ade411797..4030ec8d8f 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/resolvers.go @@ -18,9 +18,6 @@ import ( "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" ) -// ADFS is an active directory federation service authority type. -const ADFS = "ADFS" - type cacheEntry struct { Endpoints authority.Endpoints ValidForDomainsInList map[string]bool @@ -51,7 +48,7 @@ func (m *authorityEndpoint) ResolveEndpoints(ctx context.Context, authorityInfo return endpoints, nil } - endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo, userPrincipalName) + endpoint, err := m.openIDConfigurationEndpoint(ctx, authorityInfo) if err != nil { return authority.Endpoints{}, err } @@ -83,7 +80,7 @@ func (m *authorityEndpoint) cachedEndpoints(authorityInfo authority.Info, userPr defer m.mu.Unlock() if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { domain, err := adfsDomainFromUpn(userPrincipalName) if err == nil { if _, ok := cacheEntry.ValidForDomainsInList[domain]; ok { @@ -102,7 +99,7 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use updatedCacheEntry := createcacheEntry(endpoints) - if authorityInfo.AuthorityType == ADFS { + if authorityInfo.AuthorityType == authority.ADFS { // Since we're here, we've made a call to the backend. We want to ensure we're caching // the latest values from the server. if cacheEntry, ok := m.cache[authorityInfo.CanonicalAuthorityURI]; ok { @@ -119,9 +116,12 @@ func (m *authorityEndpoint) addCachedEndpoints(authorityInfo authority.Info, use m.cache[authorityInfo.CanonicalAuthorityURI] = updatedCacheEntry } -func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info, userPrincipalName string) (string, error) { - if authorityInfo.Tenant == "adfs" { +func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, authorityInfo authority.Info) (string, error) { + if authorityInfo.AuthorityType == authority.ADFS { return fmt.Sprintf("https://%s/adfs/.well-known/openid-configuration", authorityInfo.Host), nil + } else if authorityInfo.AuthorityType == authority.DSTS { + return fmt.Sprintf("https://%s/dstsv2/%s/v2.0/.well-known/openid-configuration", authorityInfo.Host, authority.DSTSTenant), nil + } else if authorityInfo.ValidateAuthority && !authority.TrustedHost(authorityInfo.Host) { resp, err := m.rest.Authority().AADInstanceDiscovery(ctx, authorityInfo) if err != nil { @@ -134,7 +134,6 @@ func (m *authorityEndpoint) openIDConfigurationEndpoint(ctx context.Context, aut return "", err } return resp.TenantDiscoveryEndpoint, nil - } return authorityInfo.CanonicalAuthorityURI + "v2.0/.well-known/openid-configuration", nil diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go index eb16b405c4..5e551abc83 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/version/version.go @@ -5,4 +5,4 @@ package version // Version is the version of this client package that is communicated to the server. -const Version = "1.2.0" +const Version = "1.4.2" diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go new file mode 100644 index 0000000000..d7cffc295e --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/azure_ml.go @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "net/http" + "os" +) + +func createAzureMLAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, os.Getenv(msiEndpointEnvVar), nil) + if err != nil { + return nil, err + } + + req.Header.Set("secret", os.Getenv(msiSecretEnvVar)) + q := req.URL.Query() + q.Set(apiVersionQueryParameterName, azureMLAPIVersion) + q.Set(resourceQueryParameterName, resource) + q.Set("clientid", os.Getenv("DEFAULT_IDENTITY_CLIENT_ID")) + if cid, ok := id.(UserAssignedClientID); ok { + q.Set("clientid", string(cid)) + } + req.URL.RawQuery = q.Encode() + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go new file mode 100644 index 0000000000..be9a0bca38 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/cloud_shell.go @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "fmt" + "io" + "net/http" + "net/url" + "os" + "strings" +) + +func createCloudShellAuthRequest(ctx context.Context, resource string) (*http.Request, error) { + msiEndpoint := os.Getenv(msiEndpointEnvVar) + msiEndpointParsed, err := url.Parse(msiEndpoint) + if err != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", msiEndpoint, err) + } + + data := url.Values{} + data.Set(resourceQueryParameterName, resource) + msiDataEncoded := data.Encode() + body := io.NopCloser(strings.NewReader(msiDataEncoded)) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, msiEndpointParsed.String(), body) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + + req.Header.Set(metaHTTPHeaderName, "true") + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go new file mode 100644 index 0000000000..ca3de4325f --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/managedidentity.go @@ -0,0 +1,717 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +/* +Package managedidentity provides a client for retrieval of Managed Identity applications. +The Managed Identity Client is used to acquire a token for managed identity assigned to +an azure resource such as Azure function, app service, virtual machine, etc. to acquire a token +without using credentials. +*/ +package managedidentity + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "sync/atomic" + "time" + + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/errors" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/base/storage" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/accesstokens" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/oauth/ops/authority" + "github.com/AzureAD/microsoft-authentication-library-for-go/apps/internal/shared" +) + +// AuthResult contains the results of one token acquisition operation. +// For details see https://aka.ms/msal-net-authenticationresult +type AuthResult = base.AuthResult + +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + +const ( + // DefaultToIMDS indicates that the source is defaulted to IMDS when no environment variables are set. + DefaultToIMDS Source = "DefaultToIMDS" + AzureArc Source = "AzureArc" + ServiceFabric Source = "ServiceFabric" + CloudShell Source = "CloudShell" + AzureML Source = "AzureML" + AppService Source = "AppService" + + // General request query parameter names + metaHTTPHeaderName = "Metadata" + apiVersionQueryParameterName = "api-version" + resourceQueryParameterName = "resource" + wwwAuthenticateHeaderName = "www-authenticate" + + // UAMI query parameter name + miQueryParameterClientId = "client_id" + miQueryParameterObjectId = "object_id" + miQueryParameterPrincipalId = "principal_id" + miQueryParameterResourceIdIMDS = "msi_res_id" + miQueryParameterResourceId = "mi_res_id" + + // IMDS + imdsDefaultEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + imdsAPIVersion = "2018-02-01" + systemAssignedManagedIdentity = "system_assigned_managed_identity" + + // Azure Arc + azureArcEndpoint = "http://127.0.0.1:40342/metadata/identity/oauth2/token" + azureArcAPIVersion = "2020-06-01" + azureArcFileExtension = ".key" + azureArcMaxFileSizeBytes int64 = 4096 + linuxTokenPath = "/var/opt/azcmagent/tokens" // #nosec G101 + linuxHimdsPath = "/opt/azcmagent/bin/himds" + azureConnectedMachine = "AzureConnectedMachineAgent" + himdsExecutableName = "himds.exe" + tokenName = "Tokens" + + // App Service + appServiceAPIVersion = "2019-08-01" + + // AzureML + azureMLAPIVersion = "2017-09-01" + // Service Fabric + serviceFabricAPIVersion = "2019-07-01-preview" + + // Environment Variables + identityEndpointEnvVar = "IDENTITY_ENDPOINT" + identityHeaderEnvVar = "IDENTITY_HEADER" + azurePodIdentityAuthorityHostEnvVar = "AZURE_POD_IDENTITY_AUTHORITY_HOST" + imdsEndVar = "IMDS_ENDPOINT" + msiEndpointEnvVar = "MSI_ENDPOINT" + msiSecretEnvVar = "MSI_SECRET" + identityServerThumbprintEnvVar = "IDENTITY_SERVER_THUMBPRINT" + + defaultRetryCount = 3 +) + +var retryCodesForIMDS = []int{ + http.StatusNotFound, // 404 + http.StatusGone, // 410 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusNotImplemented, // 501 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + http.StatusHTTPVersionNotSupported, // 505 + http.StatusVariantAlsoNegotiates, // 506 + http.StatusInsufficientStorage, // 507 + http.StatusLoopDetected, // 508 + http.StatusNotExtended, // 510 + http.StatusNetworkAuthenticationRequired, // 511 +} + +var retryStatusCodes = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 +} + +var getAzureArcPlatformPath = func(platform string) string { + switch platform { + case "windows": + return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, tokenName) + case "linux": + return linuxTokenPath + default: + return "" + } +} + +var getAzureArcHimdsFilePath = func(platform string) string { + switch platform { + case "windows": + return filepath.Join(os.Getenv("ProgramData"), azureConnectedMachine, himdsExecutableName) + case "linux": + return linuxHimdsPath + default: + return "" + } +} + +type Source string + +type ID interface { + value() string +} + +type systemAssignedValue string // its private for a reason to make the input consistent. +type UserAssignedClientID string +type UserAssignedObjectID string +type UserAssignedResourceID string + +func (s systemAssignedValue) value() string { return string(s) } +func (c UserAssignedClientID) value() string { return string(c) } +func (o UserAssignedObjectID) value() string { return string(o) } +func (r UserAssignedResourceID) value() string { return string(r) } +func SystemAssigned() ID { + return systemAssignedValue(systemAssignedManagedIdentity) +} + +// cache never uses the client because instance discovery is always disabled. +var cacheManager *storage.Manager = storage.New(nil) + +type Client struct { + httpClient ops.HTTPClient + miType ID + source Source + authParams authority.AuthParams + retryPolicyEnabled bool + canRefresh *atomic.Value +} + +type AcquireTokenOptions struct { + claims string +} + +type ClientOption func(*Client) + +type AcquireTokenOption func(o *AcquireTokenOptions) + +// WithClaims sets additional claims to request for the token, such as those required by token revocation or conditional access policies. +// Use this option when Azure AD returned a claims challenge for a prior request. The argument must be decoded. +func WithClaims(claims string) AcquireTokenOption { + return func(o *AcquireTokenOptions) { + o.claims = claims + } +} + +// WithHTTPClient allows for a custom HTTP client to be set. +func WithHTTPClient(httpClient ops.HTTPClient) ClientOption { + return func(c *Client) { + c.httpClient = httpClient + } +} + +func WithRetryPolicyDisabled() ClientOption { + return func(c *Client) { + c.retryPolicyEnabled = false + } +} + +// Client to be used to acquire tokens for managed identity. +// ID: [SystemAssigned], [UserAssignedClientID], [UserAssignedResourceID], [UserAssignedObjectID] +// +// Options: [WithHTTPClient] +func New(id ID, options ...ClientOption) (Client, error) { + source, err := GetSource() + if err != nil { + return Client{}, err + } + + // Check for user-assigned restrictions based on the source + switch source { + case AzureArc: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Azure Arc doesn't support user-assigned managed identities") + } + case AzureML: + switch id.(type) { + case UserAssignedObjectID, UserAssignedResourceID: + return Client{}, errors.New("Azure ML supports specifying a user-assigned managed identity by client ID only") + } + case CloudShell: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Cloud Shell doesn't support user-assigned managed identities") + } + case ServiceFabric: + switch id.(type) { + case UserAssignedClientID, UserAssignedResourceID, UserAssignedObjectID: + return Client{}, errors.New("Service Fabric API doesn't support specifying a user-assigned identity. The identity is determined by cluster resource configuration. See https://aka.ms/servicefabricmi") + } + } + + switch t := id.(type) { + case UserAssignedClientID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case UserAssignedResourceID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case UserAssignedObjectID: + if len(string(t)) == 0 { + return Client{}, fmt.Errorf("empty %T", t) + } + case systemAssignedValue: + default: + return Client{}, fmt.Errorf("unsupported type %T", id) + } + zero := atomic.Value{} + zero.Store(false) + client := Client{ + miType: id, + httpClient: shared.DefaultClient, + retryPolicyEnabled: true, + source: source, + canRefresh: &zero, + } + for _, option := range options { + option(&client) + } + fakeAuthInfo, err := authority.NewInfoFromAuthorityURI("https://login.microsoftonline.com/managed_identity", false, true) + if err != nil { + return Client{}, err + } + client.authParams = authority.NewAuthParams(client.miType.value(), fakeAuthInfo) + return client, nil +} + +// GetSource detects and returns the managed identity source available on the environment. +func GetSource() (Source, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + identityHeader := os.Getenv(identityHeaderEnvVar) + identityServerThumbprint := os.Getenv(identityServerThumbprintEnvVar) + msiEndpoint := os.Getenv(msiEndpointEnvVar) + msiSecret := os.Getenv(msiSecretEnvVar) + imdsEndpoint := os.Getenv(imdsEndVar) + + if identityEndpoint != "" && identityHeader != "" { + if identityServerThumbprint != "" { + return ServiceFabric, nil + } + return AppService, nil + } else if msiEndpoint != "" { + if msiSecret != "" { + return AzureML, nil + } else { + return CloudShell, nil + } + } else if isAzureArcEnvironment(identityEndpoint, imdsEndpoint) { + return AzureArc, nil + } + + return DefaultToIMDS, nil +} + +// This function wraps time.Now() and is used for refreshing the application +// was created to test the function against refreshin +var now = time.Now + +// Acquires tokens from the configured managed identity on an azure resource. +// +// Resource: scopes application is requesting access to +// Options: [WithClaims] +func (c Client) AcquireToken(ctx context.Context, resource string, options ...AcquireTokenOption) (AuthResult, error) { + resource = strings.TrimSuffix(resource, "/.default") + o := AcquireTokenOptions{} + for _, option := range options { + option(&o) + } + c.authParams.Scopes = []string{resource} + + // ignore cached access tokens when given claims + if o.claims == "" { + stResp, err := cacheManager.Read(ctx, c.authParams) + if err != nil { + return AuthResult{}, err + } + ar, err := base.AuthResultFromStorage(stResp) + if err == nil { + if !stResp.AccessToken.RefreshOn.T.IsZero() && !stResp.AccessToken.RefreshOn.T.After(now()) && c.canRefresh.CompareAndSwap(false, true) { + defer c.canRefresh.Store(false) + if tr, er := c.getToken(ctx, resource); er == nil { + return tr, nil + } + } + ar.AccessToken, err = c.authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err + } + } + return c.getToken(ctx, resource) +} + +func (c Client) getToken(ctx context.Context, resource string) (AuthResult, error) { + switch c.source { + case AzureArc: + return c.acquireTokenForAzureArc(ctx, resource) + case AzureML: + return c.acquireTokenForAzureML(ctx, resource) + case CloudShell: + return c.acquireTokenForCloudShell(ctx, resource) + case DefaultToIMDS: + return c.acquireTokenForIMDS(ctx, resource) + case AppService: + return c.acquireTokenForAppService(ctx, resource) + case ServiceFabric: + return c.acquireTokenForServiceFabric(ctx, resource) + default: + return AuthResult{}, fmt.Errorf("unsupported source %q", c.source) + } +} + +func (c Client) acquireTokenForAppService(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAppServiceAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForIMDS(ctx context.Context, resource string) (AuthResult, error) { + req, err := createIMDSAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForCloudShell(ctx context.Context, resource string) (AuthResult, error) { + req, err := createCloudShellAuthRequest(ctx, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForAzureML(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAzureMLAuthRequest(ctx, c.miType, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForServiceFabric(ctx context.Context, resource string) (AuthResult, error) { + req, err := createServiceFabricAuthRequest(ctx, resource) + if err != nil { + return AuthResult{}, err + } + tokenResponse, err := c.getTokenForRequest(req, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func (c Client) acquireTokenForAzureArc(ctx context.Context, resource string) (AuthResult, error) { + req, err := createAzureArcAuthRequest(ctx, resource, "") + if err != nil { + return AuthResult{}, err + } + + response, err := c.httpClient.Do(req) + if err != nil { + return AuthResult{}, err + } + defer response.Body.Close() + + if response.StatusCode != http.StatusUnauthorized { + return AuthResult{}, fmt.Errorf("expected a 401 response, received %d", response.StatusCode) + } + + secret, err := c.getAzureArcSecretKey(response, runtime.GOOS) + if err != nil { + return AuthResult{}, err + } + + secondRequest, err := createAzureArcAuthRequest(ctx, resource, string(secret)) + if err != nil { + return AuthResult{}, err + } + + tokenResponse, err := c.getTokenForRequest(secondRequest, resource) + if err != nil { + return AuthResult{}, err + } + return authResultFromToken(c.authParams, tokenResponse) +} + +func authResultFromToken(authParams authority.AuthParams, token accesstokens.TokenResponse) (AuthResult, error) { + if cacheManager == nil { + return AuthResult{}, errors.New("cache instance is nil") + } + account, err := cacheManager.Write(authParams, token) + if err != nil { + return AuthResult{}, err + } + // if refreshOn is not set, set it to half of the time until expiry if expiry is more than 2 hours away + if token.RefreshOn.T.IsZero() { + if lifetime := time.Until(token.ExpiresOn); lifetime > 2*time.Hour { + token.RefreshOn.T = time.Now().Add(lifetime / 2) + } + } + ar, err := base.NewAuthResult(token, account) + if err != nil { + return AuthResult{}, err + } + ar.AccessToken, err = authParams.AuthnScheme.FormatAccessToken(ar.AccessToken) + return ar, err +} + +// contains checks if the element is present in the list. +func contains[T comparable](list []T, element T) bool { + for _, v := range list { + if v == element { + return true + } + } + return false +} + +// retry performs an HTTP request with retries based on the provided options. +func (c Client) retry(maxRetries int, req *http.Request) (*http.Response, error) { + var resp *http.Response + var err error + for attempt := 0; attempt < maxRetries; attempt++ { + tryCtx, tryCancel := context.WithTimeout(req.Context(), time.Minute) + defer tryCancel() + if resp != nil && resp.Body != nil { + _, _ = io.Copy(io.Discard, resp.Body) + resp.Body.Close() + } + cloneReq := req.Clone(tryCtx) + resp, err = c.httpClient.Do(cloneReq) + retrylist := retryStatusCodes + if c.source == DefaultToIMDS { + retrylist = retryCodesForIMDS + } + if err == nil && !contains(retrylist, resp.StatusCode) { + return resp, nil + } + select { + case <-time.After(time.Second): + case <-req.Context().Done(): + err = req.Context().Err() + return resp, err + } + } + return resp, err +} + +func (c Client) getTokenForRequest(req *http.Request, resource string) (accesstokens.TokenResponse, error) { + r := accesstokens.TokenResponse{} + var resp *http.Response + var err error + + if c.retryPolicyEnabled { + resp, err = c.retry(defaultRetryCount, req) + } else { + resp, err = c.httpClient.Do(req) + } + if err != nil { + return r, err + } + responseBytes, err := io.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return r, err + } + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted: + default: + sd := strings.TrimSpace(string(responseBytes)) + if sd != "" { + return r, errors.CallErr{ + Req: req, + Resp: resp, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d:\n%s", + req.URL.String(), + req.Method, + resp.StatusCode, + sd), + } + } + return r, errors.CallErr{ + Req: req, + Resp: resp, + Err: fmt.Errorf("http call(%s)(%s) error: reply status code was %d", req.URL.String(), req.Method, resp.StatusCode), + } + } + + err = json.Unmarshal(responseBytes, &r) + if err != nil { + return r, errors.InvalidJsonErr{ + Err: fmt.Errorf("error parsing the json error: %s", err), + } + } + r.GrantedScopes.Slice = append(r.GrantedScopes.Slice, resource) + + return r, err +} + +func createAppServiceAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-IDENTITY-HEADER", os.Getenv(identityHeaderEnvVar)) + q := req.URL.Query() + q.Set("api-version", appServiceAPIVersion) + q.Set("resource", resource) + switch t := id.(type) { + case UserAssignedClientID: + q.Set(miQueryParameterClientId, string(t)) + case UserAssignedResourceID: + q.Set(miQueryParameterResourceId, string(t)) + case UserAssignedObjectID: + q.Set(miQueryParameterObjectId, string(t)) + case systemAssignedValue: + default: + return nil, fmt.Errorf("unsupported type %T", id) + } + req.URL.RawQuery = q.Encode() + return req, nil +} + +func createIMDSAuthRequest(ctx context.Context, id ID, resource string) (*http.Request, error) { + msiEndpoint, err := url.Parse(imdsDefaultEndpoint) + if err != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", imdsDefaultEndpoint, err) + } + msiParameters := msiEndpoint.Query() + msiParameters.Set(apiVersionQueryParameterName, imdsAPIVersion) + msiParameters.Set(resourceQueryParameterName, resource) + + switch t := id.(type) { + case UserAssignedClientID: + msiParameters.Set(miQueryParameterClientId, string(t)) + case UserAssignedResourceID: + msiParameters.Set(miQueryParameterResourceIdIMDS, string(t)) + case UserAssignedObjectID: + msiParameters.Set(miQueryParameterObjectId, string(t)) + case systemAssignedValue: // not adding anything + default: + return nil, fmt.Errorf("unsupported type %T", id) + } + + msiEndpoint.RawQuery = msiParameters.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + req.Header.Set(metaHTTPHeaderName, "true") + return req, nil +} + +func createAzureArcAuthRequest(ctx context.Context, resource string, key string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + if identityEndpoint == "" { + identityEndpoint = azureArcEndpoint + } + msiEndpoint, parseErr := url.Parse(identityEndpoint) + + if parseErr != nil { + return nil, fmt.Errorf("couldn't parse %q: %s", identityEndpoint, parseErr) + } + + msiParameters := msiEndpoint.Query() + msiParameters.Set(apiVersionQueryParameterName, azureArcAPIVersion) + msiParameters.Set(resourceQueryParameterName, resource) + + msiEndpoint.RawQuery = msiParameters.Encode() + req, err := http.NewRequestWithContext(ctx, http.MethodGet, msiEndpoint.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating http request %s", err) + } + req.Header.Set(metaHTTPHeaderName, "true") + + if key != "" { + req.Header.Set("Authorization", fmt.Sprintf("Basic %s", key)) + } + + return req, nil +} + +func isAzureArcEnvironment(identityEndpoint, imdsEndpoint string) bool { + if identityEndpoint != "" && imdsEndpoint != "" { + return true + } + himdsFilePath := getAzureArcHimdsFilePath(runtime.GOOS) + if himdsFilePath != "" { + if _, err := os.Stat(himdsFilePath); err == nil { + return true + } + } + return false +} + +func (c *Client) getAzureArcSecretKey(response *http.Response, platform string) (string, error) { + wwwAuthenticateHeader := response.Header.Get(wwwAuthenticateHeaderName) + + if len(wwwAuthenticateHeader) == 0 { + return "", errors.New("response has no www-authenticate header") + } + + // check if the platform is supported + expectedSecretFilePath := getAzureArcPlatformPath(platform) + if expectedSecretFilePath == "" { + return "", errors.New("platform not supported, expected linux or windows") + } + + parts := strings.Split(wwwAuthenticateHeader, "Basic realm=") + if len(parts) < 2 { + return "", fmt.Errorf("basic realm= not found in the string, instead found: %s", wwwAuthenticateHeader) + } + + secretFilePath := parts + + // check that the file in the file path is a .key file + fileName := filepath.Base(secretFilePath[1]) + if !strings.HasSuffix(fileName, azureArcFileExtension) { + return "", fmt.Errorf("invalid file extension, expected %s, got %s", azureArcFileExtension, filepath.Ext(fileName)) + } + + // check that file path from header matches the expected file path for the platform + if expectedSecretFilePath != filepath.Dir(secretFilePath[1]) { + return "", fmt.Errorf("invalid file path, expected %s, got %s", expectedSecretFilePath, filepath.Dir(secretFilePath[1])) + } + + fileInfo, err := os.Stat(secretFilePath[1]) + if err != nil { + return "", fmt.Errorf("failed to get metadata for %s due to error: %s", secretFilePath[1], err) + } + + // Throw an error if the secret file's size is greater than 4096 bytes + if s := fileInfo.Size(); s > azureArcMaxFileSizeBytes { + return "", fmt.Errorf("invalid secret file size, expected %d, file size was %d", azureArcMaxFileSizeBytes, s) + } + + // Attempt to read the contents of the secret file + secret, err := os.ReadFile(secretFilePath[1]) + if err != nil { + return "", fmt.Errorf("failed to read %q due to error: %s", secretFilePath[1], err) + } + + return string(secret), nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go new file mode 100644 index 0000000000..535065e9d9 --- /dev/null +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/managedidentity/servicefabric.go @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT license. + +package managedidentity + +import ( + "context" + "net/http" + "os" +) + +func createServiceFabricAuthRequest(ctx context.Context, resource string) (*http.Request, error) { + identityEndpoint := os.Getenv(identityEndpointEnvVar) + req, err := http.NewRequestWithContext(ctx, http.MethodGet, identityEndpoint, nil) + if err != nil { + return nil, err + } + req.Header.Set("Accept", "application/json") + req.Header.Set("Secret", os.Getenv(identityHeaderEnvVar)) + q := req.URL.Query() + q.Set("api-version", serviceFabricAPIVersion) + q.Set("resource", resource) + req.URL.RawQuery = q.Encode() + return req, nil +} diff --git a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go index 392e5e43f7..7beed26174 100644 --- a/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go +++ b/vendor/github.com/AzureAD/microsoft-authentication-library-for-go/apps/public/public.go @@ -51,6 +51,13 @@ type AuthenticationScheme = authority.AuthenticationScheme type Account = shared.Account +type TokenSource = base.TokenSource + +const ( + TokenSourceIdentityProvider = base.TokenSourceIdentityProvider + TokenSourceCache = base.TokenSourceCache +) + var errNoAccount = errors.New("no account was specified with public.WithSilentAccount(), or the specified account is invalid") // clientOptions configures the Client's behavior. @@ -387,7 +394,7 @@ func (pca Client) AcquireTokenByUsernamePassword(ctx context.Context, scopes []s if err != nil { return AuthResult{}, err } - return pca.base.AuthResultFromToken(ctx, authParams, token, true) + return pca.base.AuthResultFromToken(ctx, authParams, token) } type DeviceCodeResult = accesstokens.DeviceCodeResult @@ -412,7 +419,7 @@ func (d DeviceCode) AuthenticationResult(ctx context.Context) (AuthResult, error if err != nil { return AuthResult{}, err } - return d.client.base.AuthResultFromToken(ctx, d.authParams, token, true) + return d.client.base.AuthResultFromToken(ctx, d.authParams, token) } // acquireTokenByDeviceCodeOptions contains optional configuration for AcquireTokenByDeviceCode @@ -687,7 +694,7 @@ func (pca Client) AcquireTokenInteractive(ctx context.Context, scopes []string, return AuthResult{}, err } - return pca.base.AuthResultFromToken(ctx, authParams, token, true) + return pca.base.AuthResultFromToken(ctx, authParams, token) } type interactiveAuthResult struct { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index f3ce8183dd..2945185b0b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -349,7 +349,7 @@ func (cfg *sharedConfig) setFromIniFiles(profiles map[string]struct{}, profile s if cfg.hasSSOTokenProviderConfiguration() { skippedFiles = 0 for _, f := range files { - section, ok := f.IniData.GetSection(fmt.Sprintf(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName))) + section, ok := f.IniData.GetSection(ssoSectionPrefix + strings.TrimSpace(cfg.SSOSessionName)) if ok { var ssoSession ssoSession ssoSession.setFromIniSection(section) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index d15e3c84c0..e72db4de1e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.55.5" +const SDKVersion = "1.55.7" diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index ffc7b992b3..f4e7dbf37b 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-14-1 install_script: - pkg update -f - pkg install -y go @@ -9,5 +9,6 @@ freebsd_task: # run tests as user "cirrus" instead of root - pw useradd cirrus -m - chown -R cirrus:cirrus . - - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... - - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_BUFFER=4096 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race ./... + - FSNOTIFY_DEBUG=1 sudo --preserve-env=FSNOTIFY_BUFFER -u cirrus go test -parallel 1 -race -v ./... diff --git a/vendor/github.com/fsnotify/fsnotify/.editorconfig b/vendor/github.com/fsnotify/fsnotify/.editorconfig deleted file mode 100644 index fad895851e..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.editorconfig +++ /dev/null @@ -1,12 +0,0 @@ -root = true - -[*.go] -indent_style = tab -indent_size = 4 -insert_final_newline = true - -[*.{yml,yaml}] -indent_style = space -indent_size = 2 -insert_final_newline = true -trim_trailing_whitespace = true diff --git a/vendor/github.com/fsnotify/fsnotify/.gitattributes b/vendor/github.com/fsnotify/fsnotify/.gitattributes deleted file mode 100644 index 32f1001be0..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -go.sum linguist-generated diff --git a/vendor/github.com/fsnotify/fsnotify/.gitignore b/vendor/github.com/fsnotify/fsnotify/.gitignore index 391cc076b1..daea9dd6d6 100644 --- a/vendor/github.com/fsnotify/fsnotify/.gitignore +++ b/vendor/github.com/fsnotify/fsnotify/.gitignore @@ -5,3 +5,6 @@ # Output of go build ./cmd/fsnotify /fsnotify /fsnotify.exe + +/test/kqueue +/test/a.out diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index e0e5757549..fa854785d0 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,8 +1,36 @@ # Changelog -Unreleased ----------- -Nothing yet. +1.8.0 2023-10-31 +---------------- + +### Additions + +- all: add `FSNOTIFY_DEBUG` to print debug logs to stderr ([#619]) + +### Changes and fixes + +- windows: fix behaviour of `WatchList()` to be consistent with other platforms ([#610]) + +- kqueue: ignore events with Ident=0 ([#590]) + +- kqueue: set O_CLOEXEC to prevent passing file descriptors to children ([#617]) + +- kqueue: emit events as "/path/dir/file" instead of "path/link/file" when watching a symlink ([#625]) + +- inotify: don't send event for IN_DELETE_SELF when also watching the parent ([#620]) + +- inotify: fix panic when calling Remove() in a goroutine ([#650]) + +- fen: allow watching subdirectories of watched directories ([#621]) + +[#590]: https://github.com/fsnotify/fsnotify/pull/590 +[#610]: https://github.com/fsnotify/fsnotify/pull/610 +[#617]: https://github.com/fsnotify/fsnotify/pull/617 +[#619]: https://github.com/fsnotify/fsnotify/pull/619 +[#620]: https://github.com/fsnotify/fsnotify/pull/620 +[#621]: https://github.com/fsnotify/fsnotify/pull/621 +[#625]: https://github.com/fsnotify/fsnotify/pull/625 +[#650]: https://github.com/fsnotify/fsnotify/pull/650 1.7.0 - 2023-10-22 ------------------ diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index ea379759d5..e4ac2a2fff 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -1,7 +1,7 @@ Thank you for your interest in contributing to fsnotify! We try to review and merge PRs in a reasonable timeframe, but please be aware that: -- To avoid "wasted" work, please discus changes on the issue tracker first. You +- To avoid "wasted" work, please discuss changes on the issue tracker first. You can just send PRs, but they may end up being rejected for one reason or the other. @@ -20,6 +20,124 @@ platforms. Testing different platforms locally can be done with something like Use the `-short` flag to make the "stress test" run faster. +Writing new tests +----------------- +Scripts in the testdata directory allow creating test cases in a "shell-like" +syntax. The basic format is: + + script + + Output: + desired output + +For example: + + # Create a new empty file with some data. + watch / + echo data >/file + + Output: + create /file + write /file + +Just create a new file to add a new test; select which tests to run with +`-run TestScript/[path]`. + +script +------ +The script is a "shell-like" script: + + cmd arg arg + +Comments are supported with `#`: + + # Comment + cmd arg arg # Comment + +All operations are done in a temp directory; a path like "/foo" is rewritten to +"/tmp/TestFoo/foo". + +Arguments can be quoted with `"` or `'`; there are no escapes and they're +functionally identical right now, but this may change in the future, so best to +assume shell-like rules. + + touch "/file with spaces" + +End-of-line escapes with `\` are not supported. + +### Supported commands + + watch path [ops] # Watch the path, reporting events for it. Nothing is + # watched by default. Optionally a list of ops can be + # given, as with AddWith(path, WithOps(...)). + unwatch path # Stop watching the path. + watchlist n # Assert watchlist length. + + stop # Stop running the script; for debugging. + debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in + parallel by default, so -parallel=1 is probably a good + idea). + + touch path + mkdir [-p] dir + ln -s target link # Only ln -s supported. + mkfifo path + mknod dev path + mv src dst + rm [-r] path + chmod mode path # Octal only + sleep time-in-ms + + cat path # Read path (does nothing with the data; just reads it). + echo str >>path # Append "str" to "path". + echo str >path # Truncate "path" and write "str". + + require reason # Skip the test if "reason" is true; "skip" and + skip reason # "require" behave identical; it supports both for + # readability. Possible reasons are: + # + # always Always skip this test. + # symlink Symlinks are supported (requires admin + # permissions on Windows). + # mkfifo Platform doesn't support FIFO named sockets. + # mknod Platform doesn't support device nodes. + + +output +------ +After `Output:` the desired output is given; this is indented by convention, but +that's not required. + +The format of that is: + + # Comment + event path # Comment + + system: + event path + system2: + event path + +Every event is one line, and any whitespace between the event and path are +ignored. The path can optionally be surrounded in ". Anything after a "#" is +ignored. + +Platform-specific tests can be added after GOOS; for example: + + watch / + touch /file + + Output: + # Tested if nothing else matches + create /file + + # Windows-specific test. + windows: + write /file + +You can specify multiple platforms with a comma (e.g. "windows, linux:"). +"kqueue" is a shortcut for all kqueue systems (BSD, macOS). + [goon]: https://github.com/arp242/goon [Vagrant]: https://www.vagrantup.com/ diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index 28497f1dd8..c349c326c7 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -1,8 +1,8 @@ //go:build solaris -// +build solaris -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh +// FEN backend for illumos (supported) and Solaris (untested, but should work). +// +// See port_create(3c) etc. for docs. https://www.illumos.org/man/3C/port_create package fsnotify @@ -12,150 +12,33 @@ import ( "os" "path/filepath" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type fen struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - dirs map[string]struct{} // Explicitly watched directories - watches map[string]struct{} // Explicitly watched non-directories + done chan struct{} // Channel for sending a "quit message" to the reader goroutine + dirs map[string]Op // Explicitly watched directories + watches map[string]Op // Explicitly watched non-directories } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { - w := &Watcher{ - Events: make(chan Event, sz), - Errors: make(chan error), - dirs: make(map[string]struct{}), - watches: make(map[string]struct{}), +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + w := &fen{ + Events: ev, + Errors: errs, + dirs: make(map[string]Op), + watches: make(map[string]Op), done: make(chan struct{}), } @@ -171,27 +54,30 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { // sendEvent attempts to send an event to the user, returning true if the event // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendEvent(name string, op Op) (sent bool) { +func (w *fen) sendEvent(name string, op Op) (sent bool) { select { - case w.Events <- Event{Name: name, Op: op}: - return true case <-w.done: return false + case w.Events <- Event{Name: name, Op: op}: + return true } } // sendError attempts to send an error to the user, returning true if the error // was put in the channel successfully and false if the watcher has been closed. -func (w *Watcher) sendError(err error) (sent bool) { - select { - case w.Errors <- err: +func (w *fen) sendError(err error) (sent bool) { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *fen) isClosed() bool { select { case <-w.done: return true @@ -200,8 +86,7 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *fen) Close() error { // Take the lock used by associateFile to prevent lingering events from // being processed after the close w.mu.Lock() @@ -213,60 +98,21 @@ func (w *Watcher) Close() error { return w.port.Close() } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *fen) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *fen) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } - if w.port.PathIsWatched(name) { - return nil + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) } - _ = getOptions(opts...) + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } // Currently we resolve symlinks that were explicitly requested to be // watched. Otherwise we would use LStat here. @@ -283,7 +129,7 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.dirs[name] = struct{}{} + w.dirs[name] = with.op w.mu.Unlock() return nil } @@ -294,26 +140,22 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { } w.mu.Lock() - w.watches[name] = struct{}{} + w.watches[name] = with.op w.mu.Unlock() return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *fen) Remove(name string) error { if w.isClosed() { return nil } if !w.port.PathIsWatched(name) { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } // The user has expressed an intent. Immediately remove this name from // whichever watch list it might be in. If it's not in there the delete @@ -346,7 +188,7 @@ func (w *Watcher) Remove(name string) error { } // readEvents contains the main loop that runs in a goroutine watching for events. -func (w *Watcher) readEvents() { +func (w *fen) readEvents() { // If this function returns, the watcher has been closed and we can close // these channels defer func() { @@ -382,17 +224,19 @@ func (w *Watcher) readEvents() { continue } + if debug { + internal.Debug(pevent.Path, pevent.Events) + } + err = w.handleEvent(&pevent) - if err != nil { - if !w.sendError(err) { - return - } + if !w.sendError(err) { + return } } } } -func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { +func (w *fen) handleDirectory(path string, stat os.FileInfo, follow bool, handler func(string, os.FileInfo, bool) error) error { files, err := os.ReadDir(path) if err != nil { return err @@ -418,7 +262,7 @@ func (w *Watcher) handleDirectory(path string, stat os.FileInfo, follow bool, ha // bitmap matches more than one event type (e.g. the file was both modified and // had the attributes changed between when the association was created and the // when event was returned) -func (w *Watcher) handleEvent(event *unix.PortEvent) error { +func (w *fen) handleEvent(event *unix.PortEvent) error { var ( events = event.Events path = event.Path @@ -510,15 +354,9 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { } if events&unix.FILE_MODIFIED != 0 { - if fmode.IsDir() { - if watchedDir { - if err := w.updateDirectory(path); err != nil { - return err - } - } else { - if !w.sendEvent(path, Write) { - return nil - } + if fmode.IsDir() && watchedDir { + if err := w.updateDirectory(path); err != nil { + return err } } else { if !w.sendEvent(path, Write) { @@ -543,7 +381,7 @@ func (w *Watcher) handleEvent(event *unix.PortEvent) error { return nil } -func (w *Watcher) updateDirectory(path string) error { +func (w *fen) updateDirectory(path string) error { // The directory was modified, so we must find unwatched entities and watch // them. If something was removed from the directory, nothing will happen, // as everything else should still be watched. @@ -563,10 +401,8 @@ func (w *Watcher) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) - if err != nil { - if !w.sendError(err) { - return nil - } + if !w.sendError(err) { + return nil } if !w.sendEvent(path, Create) { return nil @@ -575,7 +411,7 @@ func (w *Watcher) updateDirectory(path string) error { return nil } -func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) error { +func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if w.isClosed() { return ErrClosed } @@ -593,34 +429,34 @@ func (w *Watcher) associateFile(path string, stat os.FileInfo, follow bool) erro // cleared up that discrepancy. The most likely cause is that the event // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) - if err != nil && err != unix.ENOENT { + if err != nil && !errors.Is(err, unix.ENOENT) { return err } } - // FILE_NOFOLLOW means we watch symlinks themselves rather than their - // targets. - events := unix.FILE_MODIFIED | unix.FILE_ATTRIB | unix.FILE_NOFOLLOW - if follow { - // We *DO* follow symlinks for explicitly watched entries. - events = unix.FILE_MODIFIED | unix.FILE_ATTRIB + + var events int + if !follow { + // Watch symlinks themselves rather than their targets unless this entry + // is explicitly watched. + events |= unix.FILE_NOFOLLOW + } + if true { // TODO: implement withOps() + events |= unix.FILE_MODIFIED } - return w.port.AssociatePath(path, stat, - events, - stat.Mode()) + if true { + events |= unix.FILE_ATTRIB + } + return w.port.AssociatePath(path, stat, events, stat.Mode()) } -func (w *Watcher) dissociateFile(path string, stat os.FileInfo, unused bool) error { +func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } return w.port.DissociatePath(path) } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *fen) WatchList() []string { if w.isClosed() { return nil } @@ -638,3 +474,11 @@ func (w *Watcher) WatchList() []string { return entries } + +func (w *fen) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 921c1c1e40..36c311694c 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -1,8 +1,4 @@ //go:build linux && !appengine -// +build linux,!appengine - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -10,127 +6,20 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type inotify struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error // Store fd here as os.File.Read() will no longer return on close after @@ -139,8 +28,26 @@ type Watcher struct { inotifyFile *os.File watches *watches done chan struct{} // Channel for sending a "quit message" to the reader goroutine - closeMu sync.Mutex + doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close + + // Store rename cookies in an array, with the index wrapping to 0. Almost + // all of the time what we get is a MOVED_FROM to set the cookie and the + // next event inotify sends will be MOVED_TO to read it. However, this is + // not guaranteed – as described in inotify(7) – and we may get other events + // between the two MOVED_* events (including other MOVED_* ones). + // + // A second issue is that moving a file outside the watched directory will + // trigger a MOVED_FROM to set the cookie, but we never see the MOVED_TO to + // read and delete it. So just storing it in a map would slowly leak memory. + // + // Doing it like this gives us a simple fast LRU-cache that won't allocate. + // Ten items should be more than enough for our purpose, and a loop over + // such a short array is faster than a map access anyway (not that it hugely + // matters since we're talking about hundreds of ns at the most, but still). + cookies [10]koekje + cookieIndex uint8 + cookiesMu sync.Mutex } type ( @@ -150,9 +57,14 @@ type ( path map[string]uint32 // pathname → wd } watch struct { - wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) - flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) - path string // Watch path. + wd uint32 // Watch descriptor (as returned by the inotify_add_watch() syscall) + flags uint32 // inotify flags of this watch (see inotify(7) for the list of valid flags) + path string // Watch path. + recurse bool // Recursion with ./...? + } + koekje struct { + cookie uint32 + path string } ) @@ -179,23 +91,45 @@ func (w *watches) add(ww *watch) { func (w *watches) remove(wd uint32) { w.mu.Lock() defer w.mu.Unlock() - delete(w.path, w.wd[wd].path) + watch := w.wd[wd] // Could have had Remove() called. See #616. + if watch == nil { + return + } + delete(w.path, watch.path) delete(w.wd, wd) } -func (w *watches) removePath(path string) (uint32, bool) { +func (w *watches) removePath(path string) ([]uint32, error) { w.mu.Lock() defer w.mu.Unlock() + path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { - return 0, false + return nil, fmt.Errorf("%w: %s", ErrNonExistentWatch, path) + } + + watch := w.wd[wd] + if recurse && !watch.recurse { + return nil, fmt.Errorf("can't use /... with non-recursive watch %q", path) } delete(w.path, path) delete(w.wd, wd) + if !watch.recurse { + return []uint32{wd}, nil + } - return wd, true + wds := make([]uint32, 0, 8) + wds = append(wds, wd) + for p, rwd := range w.path { + if filepath.HasPrefix(p, path) { + delete(w.path, p) + delete(w.wd, rwd) + wds = append(wds, rwd) + } + } + return wds, nil } func (w *watches) byPath(path string) *watch { @@ -236,20 +170,11 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -257,12 +182,12 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { return nil, errno } - w := &Watcher{ + w := &inotify{ + Events: ev, + Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - Events: make(chan Event, sz), - Errors: make(chan error), done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -272,26 +197,29 @@ func NewBufferedWatcher(sz uint) (*Watcher, error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *inotify) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { - select { - case w.Errors <- err: +func (w *inotify) sendError(err error) bool { + if err == nil { return true + } + select { case <-w.done: return false + case w.Errors <- err: + return true } } -func (w *Watcher) isClosed() bool { +func (w *inotify) isClosed() bool { select { case <-w.done: return true @@ -300,15 +228,14 @@ func (w *Watcher) isClosed() bool { } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.closeMu.Lock() +func (w *inotify) Close() error { + w.doneMu.Lock() if w.isClosed() { - w.closeMu.Unlock() + w.doneMu.Unlock() return nil } close(w.done) - w.closeMu.Unlock() + w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -323,78 +250,104 @@ func (w *Watcher) Close() error { return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *inotify) Add(name string) error { return w.AddWith(name) } + +func (w *inotify) AddWith(path string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), path) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - name = filepath.Clean(name) - _ = getOptions(opts...) + path, recurse := recursivePath(path) + if recurse { + return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() { + if root == path { + return fmt.Errorf("fsnotify: not a directory: %q", path) + } + return nil + } - var flags uint32 = unix.IN_MOVED_TO | unix.IN_MOVED_FROM | - unix.IN_CREATE | unix.IN_ATTRIB | unix.IN_MODIFY | - unix.IN_MOVE_SELF | unix.IN_DELETE | unix.IN_DELETE_SELF + // Send a Create event when adding new directory from a recursive + // watch; this is for "mkdir -p one/two/three". Usually all those + // directories will be created before we can set up watchers on the + // subdirectories, so only "one" would be sent as a Create event and + // not "one/two" and "one/two/three" (inotifywait -r has the same + // problem). + if with.sendCreate && root != path { + w.sendEvent(Event{Name: root, Op: Create}) + } + + return w.add(root, with, true) + }) + } - return w.watches.updatePath(name, func(existing *watch) (*watch, error) { + return w.add(path, with, false) +} + +func (w *inotify) add(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) +} + +func (w *inotify) register(path string, flags uint32, recurse bool) error { + return w.watches.updatePath(path, func(existing *watch) (*watch, error) { if existing != nil { flags |= existing.flags | unix.IN_MASK_ADD } - wd, err := unix.InotifyAddWatch(w.fd, name, flags) + wd, err := unix.InotifyAddWatch(w.fd, path, flags) if wd == -1 { return nil, err } if existing == nil { return &watch{ - wd: uint32(wd), - path: name, - flags: flags, + wd: uint32(wd), + path: path, + flags: flags, + recurse: recurse, }, nil } @@ -404,49 +357,44 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { }) } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *inotify) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(filepath.Clean(name)) } -func (w *Watcher) remove(name string) error { - wd, ok := w.watches.removePath(name) - if !ok { - return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) - } - - success, errno := unix.InotifyRmWatch(w.fd, wd) - if success == -1 { - // TODO: Perhaps it's not helpful to return an error here in every case; - // The only two possible errors are: - // - // - EBADF, which happens when w.fd is not a valid file descriptor - // of any kind. - // - EINVAL, which is when fd is not an inotify descriptor or wd - // is not a valid watch descriptor. Watch descriptors are - // invalidated when they are removed explicitly or implicitly; - // explicitly by inotify_rm_watch, implicitly when the file they - // are watching is deleted. - return errno +func (w *inotify) remove(name string) error { + wds, err := w.watches.removePath(name) + if err != nil { + return err + } + + for _, wd := range wds { + _, err := unix.InotifyRmWatch(w.fd, wd) + if err != nil { + // TODO: Perhaps it's not helpful to return an error here in every + // case; the only two possible errors are: + // + // EBADF, which happens when w.fd is not a valid file descriptor of + // any kind. + // + // EINVAL, which is when fd is not an inotify descriptor or wd is + // not a valid watch descriptor. Watch descriptors are invalidated + // when they are removed explicitly or implicitly; explicitly by + // inotify_rm_watch, implicitly when the file they are watching is + // deleted. + return err + } } return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *inotify) WatchList() []string { if w.isClosed() { return nil } @@ -463,7 +411,7 @@ func (w *Watcher) WatchList() []string { // readEvents reads from the inotify file descriptor, converts the // received events into Event objects and sends them via the Events channel -func (w *Watcher) readEvents() { +func (w *inotify) readEvents() { defer func() { close(w.doneResp) close(w.Errors) @@ -506,15 +454,17 @@ func (w *Watcher) readEvents() { continue } - var offset uint32 // We don't know how many events we just read into the buffer // While the offset points to at least one whole event... + var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { var ( // Point "raw" to the event in the buffer raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) mask = uint32(raw.Mask) nameLen = uint32(raw.Len) + // Move to the next event in the buffer + next = func() { offset += unix.SizeofInotifyEvent + nameLen } ) if mask&unix.IN_Q_OVERFLOW != 0 { @@ -523,21 +473,53 @@ func (w *Watcher) readEvents() { } } - // If the event happened to the watched directory or the watched file, the kernel - // doesn't append the filename to the event, but we would like to always fill the - // the "Name" field with a valid filename. We retrieve the path of the watch from - // the "paths" map. + /// If the event happened to the watched directory or the watched + /// file, the kernel doesn't append the filename to the event, but + /// we would like to always fill the the "Name" field with a valid + /// filename. We retrieve the path of the watch from the "paths" + /// map. watch := w.watches.byWd(uint32(raw.Wd)) + /// Can be nil if Remove() was called in another goroutine for this + /// path inbetween reading the events from the kernel and reading + /// the internal state. Not much we can do about it, so just skip. + /// See #616. + if watch == nil { + next() + continue + } + + name := watch.path + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + } + + if debug { + internal.Debug(name, raw.Mask, raw.Cookie) + } + + if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 + next() + continue + } // inotify will automatically remove the watch on deletes; just need // to clean our state here. - if watch != nil && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { w.watches.remove(watch.wd) } + // We can't really update the state when a watched path is moved; // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove // the watch. - if watch != nil && mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { + next() // Do nothing + continue + } + err := w.remove(watch.path) if err != nil && !errors.Is(err, ErrNonExistentWatch) { if !w.sendError(err) { @@ -546,34 +528,69 @@ func (w *Watcher) readEvents() { } } - var name string - if watch != nil { - name = watch.path - } - if nameLen > 0 { - // Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - // The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") + /// Skip if we're watching both this path and the parent; the parent + /// will already send a delete so no need to do it twice. + if mask&unix.IN_DELETE_SELF != 0 { + if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { + next() + continue + } } - event := w.newEvent(name, mask) + ev := w.newEvent(name, mask, raw.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return + } - // Send the events that are not ignored on the events channel - if mask&unix.IN_IGNORED == 0 { - if !w.sendEvent(event) { - return + // This was a directory rename, so we need to update all + // the children. + // + // TODO: this is of course pretty slow; we should use a + // better data structure for storing all of this, e.g. store + // children in the watch. I have some code for this in my + // kqueue refactor we can use in the future. For now I'm + // okay with this as it's not publicly available. + // Correctness first, performance second. + if ev.renamedFrom != "" { + w.watches.mu.Lock() + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + w.watches.mu.Unlock() + } } } - // Move to the next event in the buffer - offset += unix.SizeofInotifyEvent + nameLen + /// Send the events that are not ignored on the events channel + if !w.sendEvent(ev) { + return + } + next() } } } -// newEvent returns an platform-independent Event based on an inotify mask. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *inotify) isRecursive(path string) bool { + ww := w.watches.byPath(path) + if ww == nil { // path could be a file, so also check the Dir. + ww = w.watches.byPath(filepath.Dir(path)) + } + return ww != nil && ww.recurse +} + +func (w *inotify) newEvent(name string, mask, cookie uint32) Event { e := Event{Name: name} if mask&unix.IN_CREATE == unix.IN_CREATE || mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { e.Op |= Create @@ -584,11 +601,58 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { if mask&unix.IN_MODIFY == unix.IN_MODIFY { e.Op |= Write } + if mask&unix.IN_OPEN == unix.IN_OPEN { + e.Op |= xUnportableOpen + } + if mask&unix.IN_ACCESS == unix.IN_ACCESS { + e.Op |= xUnportableRead + } + if mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE { + e.Op |= xUnportableCloseWrite + } + if mask&unix.IN_CLOSE_NOWRITE == unix.IN_CLOSE_NOWRITE { + e.Op |= xUnportableCloseRead + } if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF || mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { e.Op |= Rename } if mask&unix.IN_ATTRIB == unix.IN_ATTRIB { e.Op |= Chmod } + + if cookie != 0 { + if mask&unix.IN_MOVED_FROM == unix.IN_MOVED_FROM { + w.cookiesMu.Lock() + w.cookies[w.cookieIndex] = koekje{cookie: cookie, path: e.Name} + w.cookieIndex++ + if w.cookieIndex > 9 { + w.cookieIndex = 0 + } + w.cookiesMu.Unlock() + } else if mask&unix.IN_MOVED_TO == unix.IN_MOVED_TO { + w.cookiesMu.Lock() + var prev string + for _, c := range w.cookies { + if c.cookie == cookie { + prev = c.path + break + } + } + w.cookiesMu.Unlock() + e.renamedFrom = prev + } + } return e } + +func (w *inotify) xSupports(op Op) bool { + return true // Supports everything. +} + +func (w *inotify) state() { + w.watches.mu.Lock() + defer w.watches.mu.Unlock() + for wd, ww := range w.watches.wd { + fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) + } +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index 063a0915a0..d8de5ab76f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -1,8 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly || darwin -// +build freebsd openbsd netbsd dragonfly darwin - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -11,174 +7,195 @@ import ( "fmt" "os" "path/filepath" + "runtime" "sync" + "time" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/unix" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type kqueue struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error - done chan struct{} - kq int // File descriptor (as returned by the kqueue() syscall). - closepipe [2]int // Pipe used for closing. - mu sync.Mutex // Protects access to watcher data - watches map[string]int // Watched file descriptors (key: path). - watchesByDir map[string]map[int]struct{} // Watched file descriptors indexed by the parent directory (key: dirname(path)). - userWatches map[string]struct{} // Watches added with Watcher.Add() - dirFlags map[string]uint32 // Watched directories to fflags used in kqueue. - paths map[int]pathInfo // File descriptors to path names for processing kqueue events. - fileExists map[string]struct{} // Keep track of if we know this file exists (to stop duplicate create events). - isClosed bool // Set to true when Close() is first called + kq int // File descriptor (as returned by the kqueue() syscall). + closepipe [2]int // Pipe used for closing kq. + watches *watches + done chan struct{} + doneMu sync.Mutex } -type pathInfo struct { - name string - isDir bool +type ( + watches struct { + mu sync.RWMutex + wd map[int]watch // wd → watch + path map[string]int // pathname → wd + byDir map[string]map[int]struct{} // dirname(path) → wd + seen map[string]struct{} // Keep track of if we know this file exists. + byUser map[string]struct{} // Watches added with Watcher.Add() + } + watch struct { + wd int + name string + linkName string // In case of links; name is the target, and this is the link. + isDir bool + dirFlags uint32 + } +) + +func newWatches() *watches { + return &watches{ + wd: make(map[int]watch), + path: make(map[string]int), + byDir: make(map[string]map[int]struct{}), + seen: make(map[string]struct{}), + byUser: make(map[string]struct{}), + } } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(0) +func (w *watches) listPaths(userOnly bool) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + if userOnly { + l := make([]string, 0, len(w.byUser)) + for p := range w.byUser { + l = append(l, p) + } + return l + } + + l := make([]string, 0, len(w.path)) + for p := range w.path { + l = append(l, p) + } + return l } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func (w *watches) watchesInDir(path string) []string { + w.mu.RLock() + defer w.mu.RUnlock() + + l := make([]string, 0, 4) + for fd := range w.byDir[path] { + info := w.wd[fd] + if _, ok := w.byUser[info.name]; !ok { + l = append(l, info.name) + } + } + return l +} + +// Mark path as added by the user. +func (w *watches) addUserWatch(path string) { + w.mu.Lock() + defer w.mu.Unlock() + w.byUser[path] = struct{}{} +} + +func (w *watches) addLink(path string, fd int) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.seen[path] = struct{}{} +} + +func (w *watches) add(path, linkPath string, fd int, isDir bool) { + w.mu.Lock() + defer w.mu.Unlock() + + w.path[path] = fd + w.wd[fd] = watch{wd: fd, name: path, linkName: linkPath, isDir: isDir} + + parent := filepath.Dir(path) + byDir, ok := w.byDir[parent] + if !ok { + byDir = make(map[int]struct{}, 1) + w.byDir[parent] = byDir + } + byDir[fd] = struct{}{} +} + +func (w *watches) byWd(fd int) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[fd] + return info, ok +} + +func (w *watches) byPath(path string) (watch, bool) { + w.mu.RLock() + defer w.mu.RUnlock() + info, ok := w.wd[w.path[path]] + return info, ok +} + +func (w *watches) updateDirFlags(path string, flags uint32) { + w.mu.Lock() + defer w.mu.Unlock() + + fd := w.path[path] + info := w.wd[fd] + info.dirFlags = flags + w.wd[fd] = info +} + +func (w *watches) remove(fd int, path string) bool { + w.mu.Lock() + defer w.mu.Unlock() + + isDir := w.wd[fd].isDir + delete(w.path, path) + delete(w.byUser, path) + + parent := filepath.Dir(path) + delete(w.byDir[parent], fd) + + if len(w.byDir[parent]) == 0 { + delete(w.byDir, parent) + } + + delete(w.wd, fd) + delete(w.seen, path) + return isDir +} + +func (w *watches) markSeen(path string, exists bool) { + w.mu.Lock() + defer w.mu.Unlock() + if exists { + w.seen[path] = struct{}{} + } else { + delete(w.seen, path) + } +} + +func (w *watches) seenBefore(path string) bool { + w.mu.RLock() + defer w.mu.RUnlock() + _, ok := w.seen[path] + return ok +} + +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(0, ev, errs) +} + +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } - w := &Watcher{ - kq: kq, - closepipe: closepipe, - watches: make(map[string]int), - watchesByDir: make(map[string]map[int]struct{}), - dirFlags: make(map[string]uint32), - paths: make(map[int]pathInfo), - fileExists: make(map[string]struct{}), - userWatches: make(map[string]struct{}), - Events: make(chan Event, sz), - Errors: make(chan error), - done: make(chan struct{}), + w := &kqueue{ + Events: ev, + Errors: errs, + kq: kq, + closepipe: closepipe, + done: make(chan struct{}), + watches: newWatches(), } go w.readEvents() @@ -203,6 +220,8 @@ func newKqueue() (kq int, closepipe [2]int, err error) { unix.Close(kq) return kq, closepipe, err } + unix.CloseOnExec(closepipe[0]) + unix.CloseOnExec(closepipe[1]) // Register changes to listen on the closepipe. changes := make([]unix.Kevent_t, 1) @@ -221,166 +240,108 @@ func newKqueue() (kq int, closepipe [2]int, err error) { } // Returns true if the event was sent, or false if watcher is closed. -func (w *Watcher) sendEvent(e Event) bool { +func (w *kqueue) sendEvent(e Event) bool { select { - case w.Events <- e: - return true case <-w.done: return false + case w.Events <- e: + return true } } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *kqueue) sendError(err error) bool { + if err == nil { + return true + } select { + case <-w.done: + return false case w.Errors <- err: return true + } +} + +func (w *kqueue) isClosed() bool { + select { case <-w.done: + return true + default: return false } } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) Close() error { + w.doneMu.Lock() + if w.isClosed() { + w.doneMu.Unlock() return nil } - w.isClosed = true + close(w.done) + w.doneMu.Unlock() - // copy paths to remove while locked - pathsToRemove := make([]string, 0, len(w.watches)) - for name := range w.watches { - pathsToRemove = append(pathsToRemove, name) - } - w.mu.Unlock() // Unlock before calling Remove, which also locks + pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } // Send "quit" message to the reader goroutine. unix.Close(w.closepipe[1]) - close(w.done) - return nil } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *kqueue) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { - _ = getOptions(opts...) +func (w *kqueue) AddWith(name string, opts ...addOpt) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } + + with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } - w.mu.Lock() - w.userWatches[name] = struct{}{} - w.mu.Unlock() _, err := w.addWatch(name, noteAllEvents) - return err + if err != nil { + return err + } + w.watches.addUserWatch(name) + return nil } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *kqueue) Remove(name string) error { + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), name) + } return w.remove(name, true) } -func (w *Watcher) remove(name string, unwatchFiles bool) error { - name = filepath.Clean(name) - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) remove(name string, unwatchFiles bool) error { + if w.isClosed() { return nil } - watchfd, ok := w.watches[name] - w.mu.Unlock() + + name = filepath.Clean(name) + info, ok := w.watches.byPath(name) if !ok { return fmt.Errorf("%w: %s", ErrNonExistentWatch, name) } - err := w.register([]int{watchfd}, unix.EV_DELETE, 0) + err := w.register([]int{info.wd}, unix.EV_DELETE, 0) if err != nil { return err } - unix.Close(watchfd) - - w.mu.Lock() - isDir := w.paths[watchfd].isDir - delete(w.watches, name) - delete(w.userWatches, name) - - parentName := filepath.Dir(name) - delete(w.watchesByDir[parentName], watchfd) - - if len(w.watchesByDir[parentName]) == 0 { - delete(w.watchesByDir, parentName) - } + unix.Close(info.wd) - delete(w.paths, watchfd) - delete(w.dirFlags, name) - delete(w.fileExists, name) - w.mu.Unlock() + isDir := w.watches.remove(info.wd, name) // Find all watched paths that are in this directory that are not external. if unwatchFiles && isDir { - var pathsToRemove []string - w.mu.Lock() - for fd := range w.watchesByDir[name] { - path := w.paths[fd] - if _, ok := w.userWatches[path.name]; !ok { - pathsToRemove = append(pathsToRemove, path.name) - } - } - w.mu.Unlock() + pathsToRemove := w.watches.watchesInDir(name) for _, name := range pathsToRemove { // Since these are internal, not much sense in propagating error to // the user, as that will just confuse them with an error about a @@ -391,23 +352,11 @@ func (w *Watcher) remove(name string, unwatchFiles bool) error { return nil } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed { +func (w *kqueue) WatchList() []string { + if w.isClosed() { return nil } - - entries := make([]string, 0, len(w.userWatches)) - for pathname := range w.userWatches { - entries = append(entries, pathname) - } - - return entries + return w.watches.listPaths(true) } // Watch all events (except NOTE_EXTEND, NOTE_LINK, NOTE_REVOKE) @@ -417,34 +366,26 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *Watcher) addWatch(name string, flags uint32) (string, error) { - var isDir bool - name = filepath.Clean(name) - - w.mu.Lock() - if w.isClosed { - w.mu.Unlock() +func (w *kqueue) addWatch(name string, flags uint32) (string, error) { + if w.isClosed() { return "", ErrClosed } - watchfd, alreadyWatching := w.watches[name] - // We already have a watch, but we can still override flags. - if alreadyWatching { - isDir = w.paths[watchfd].isDir - } - w.mu.Unlock() + name = filepath.Clean(name) + + info, alreadyWatching := w.watches.byPath(name) if !alreadyWatching { fi, err := os.Lstat(name) if err != nil { return "", err } - // Don't watch sockets or named pipes + // Don't watch sockets or named pipes. if (fi.Mode()&os.ModeSocket == os.ModeSocket) || (fi.Mode()&os.ModeNamedPipe == os.ModeNamedPipe) { return "", nil } - // Follow Symlinks. + // Follow symlinks. if fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { @@ -455,18 +396,15 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", nil } - w.mu.Lock() - _, alreadyWatching = w.watches[link] - w.mu.Unlock() - + _, alreadyWatching = w.watches.byPath(link) if alreadyWatching { // Add to watches so we don't get spurious Create events later // on when we diff the directories. - w.watches[name] = 0 - w.fileExists[name] = struct{}{} + w.watches.addLink(name, 0) return link, nil } + info.linkName = name name = link fi, err = os.Lstat(name) if err != nil { @@ -477,7 +415,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // Retry on EINTR; open() can return EINTR in practice on macOS. // See #354, and Go issues 11180 and 39237. for { - watchfd, err = unix.Open(name, openMode, 0) + info.wd, err = unix.Open(name, openMode, 0) if err == nil { break } @@ -488,40 +426,25 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { return "", err } - isDir = fi.IsDir() + info.isDir = fi.IsDir() } - err := w.register([]int{watchfd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) + err := w.register([]int{info.wd}, unix.EV_ADD|unix.EV_CLEAR|unix.EV_ENABLE, flags) if err != nil { - unix.Close(watchfd) + unix.Close(info.wd) return "", err } if !alreadyWatching { - w.mu.Lock() - parentName := filepath.Dir(name) - w.watches[name] = watchfd - - watchesByDir, ok := w.watchesByDir[parentName] - if !ok { - watchesByDir = make(map[int]struct{}, 1) - w.watchesByDir[parentName] = watchesByDir - } - watchesByDir[watchfd] = struct{}{} - w.paths[watchfd] = pathInfo{name: name, isDir: isDir} - w.mu.Unlock() + w.watches.add(name, info.linkName, info.wd, info.isDir) } - if isDir { - // Watch the directory if it has not been watched before, or if it was - // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) - w.mu.Lock() - + // Watch the directory if it has not been watched before, or if it was + // watched before, but perhaps only a NOTE_DELETE (watchDirectoryFiles) + if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && - (!alreadyWatching || (w.dirFlags[name]&unix.NOTE_WRITE) != unix.NOTE_WRITE) - // Store flags so this watch can be updated later - w.dirFlags[name] = flags - w.mu.Unlock() + (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) + w.watches.updateDirFlags(name, flags) if watchDir { if err := w.watchDirectoryFiles(name); err != nil { @@ -534,7 +457,7 @@ func (w *Watcher) addWatch(name string, flags uint32) (string, error) { // readEvents reads from kqueue and converts the received kevents into // Event values that it sends down the Events channel. -func (w *Watcher) readEvents() { +func (w *kqueue) readEvents() { defer func() { close(w.Events) close(w.Errors) @@ -543,50 +466,65 @@ func (w *Watcher) readEvents() { }() eventBuffer := make([]unix.Kevent_t, 10) - for closed := false; !closed; { + for { kevents, err := w.read(eventBuffer) // EINTR is okay, the syscall was interrupted before timeout expired. if err != nil && err != unix.EINTR { if !w.sendError(fmt.Errorf("fsnotify.readEvents: %w", err)) { - closed = true + return } - continue } - // Flush the events we received to the Events channel for _, kevent := range kevents { var ( - watchfd = int(kevent.Ident) - mask = uint32(kevent.Fflags) + wd = int(kevent.Ident) + mask = uint32(kevent.Fflags) ) // Shut down the loop when the pipe is closed, but only after all // other events have been processed. - if watchfd == w.closepipe[0] { - closed = true - continue + if wd == w.closepipe[0] { + return } - w.mu.Lock() - path := w.paths[watchfd] - w.mu.Unlock() + path, ok := w.watches.byWd(wd) + if debug { + internal.Debug(path.name, &kevent) + } - event := w.newEvent(path.name, mask) + // On macOS it seems that sometimes an event with Ident=0 is + // delivered, and no other flags/information beyond that, even + // though we never saw such a file descriptor. For example in + // TestWatchSymlink/277 (usually at the end, but sometimes sooner): + // + // fmt.Printf("READ: %2d %#v\n", kevent.Ident, kevent) + // unix.Kevent_t{Ident:0x2a, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // unix.Kevent_t{Ident:0x0, Filter:-4, Flags:0x25, Fflags:0x2, Data:0, Udata:(*uint8)(nil)} + // + // The first is a normal event, the second with Ident 0. No error + // flag, no data, no ... nothing. + // + // I read a bit through bsd/kern_event.c from the xnu source, but I + // don't really see an obvious location where this is triggered – + // this doesn't seem intentional, but idk... + // + // Technically fd 0 is a valid descriptor, so only skip it if + // there's no path, and if we're on macOS. + if !ok && kevent.Ident == 0 && runtime.GOOS == "darwin" { + continue + } + + event := w.newEvent(path.name, path.linkName, mask) if event.Has(Rename) || event.Has(Remove) { w.remove(event.Name, false) - w.mu.Lock() - delete(w.fileExists, event.Name) - w.mu.Unlock() + w.watches.markSeen(event.Name, false) } if path.isDir && event.Has(Write) && !event.Has(Remove) { - w.sendDirectoryChangeEvents(event.Name) - } else { - if !w.sendEvent(event) { - closed = true - continue - } + w.dirChange(event.Name) + } else if !w.sendEvent(event) { + return } if event.Has(Remove) { @@ -594,25 +532,34 @@ func (w *Watcher) readEvents() { // mv f1 f2 will delete f2, then create f2. if path.isDir { fileDir := filepath.Clean(event.Name) - w.mu.Lock() - _, found := w.watches[fileDir] - w.mu.Unlock() + _, found := w.watches.byPath(fileDir) if found { - err := w.sendDirectoryChangeEvents(fileDir) - if err != nil { - if !w.sendError(err) { - closed = true - } + // TODO: this branch is never triggered in any test. + // Added in d6220df (2012). + // isDir check added in 8611c35 (2016): https://github.com/fsnotify/fsnotify/pull/111 + // + // I don't really get how this can be triggered either. + // And it wasn't triggered in the patch that added it, + // either. + // + // Original also had a comment: + // make sure the directory exists before we watch for + // changes. When we do a recursive watch and perform + // rm -rf, the parent directory might have gone + // missing, ignore the missing directory and let the + // upcoming delete event remove the watch from the + // parent directory. + err := w.dirChange(fileDir) + if !w.sendError(err) { + return } } } else { - filePath := filepath.Clean(event.Name) - if fi, err := os.Lstat(filePath); err == nil { - err := w.sendFileCreatedEventIfNew(filePath, fi) - if err != nil { - if !w.sendError(err) { - closed = true - } + path := filepath.Clean(event.Name) + if fi, err := os.Lstat(path); err == nil { + err := w.sendCreateIfNew(path, fi) + if !w.sendError(err) { + return } } } @@ -622,8 +569,14 @@ func (w *Watcher) readEvents() { } // newEvent returns an platform-independent Event based on kqueue Fflags. -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *kqueue) newEvent(name, linkName string, mask uint32) Event { e := Event{Name: name} + if linkName != "" { + // If the user watched "/path/link" then emit events as "/path/link" + // rather than "/path/target". + e.Name = linkName + } + if mask&unix.NOTE_DELETE == unix.NOTE_DELETE { e.Op |= Remove } @@ -645,8 +598,7 @@ func (w *Watcher) newEvent(name string, mask uint32) Event { } // watchDirectoryFiles to mimic inotify when adding a watch on a directory -func (w *Watcher) watchDirectoryFiles(dirPath string) error { - // Get all files +func (w *kqueue) watchDirectoryFiles(dirPath string) error { files, err := os.ReadDir(dirPath) if err != nil { return err @@ -674,9 +626,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { } } - w.mu.Lock() - w.fileExists[cleanPath] = struct{}{} - w.mu.Unlock() + w.watches.markSeen(cleanPath, true) } return nil @@ -686,7 +636,7 @@ func (w *Watcher) watchDirectoryFiles(dirPath string) error { // // This functionality is to have the BSD watcher match the inotify, which sends // a create event for files created in a watched directory. -func (w *Watcher) sendDirectoryChangeEvents(dir string) error { +func (w *kqueue) dirChange(dir string) error { files, err := os.ReadDir(dir) if err != nil { // Directory no longer exists: we can ignore this safely. kqueue will @@ -694,61 +644,51 @@ func (w *Watcher) sendDirectoryChangeEvents(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } for _, f := range files { fi, err := f.Info() if err != nil { - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } - err = w.sendFileCreatedEventIfNew(filepath.Join(dir, fi.Name()), fi) + err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { return nil } - return fmt.Errorf("fsnotify.sendDirectoryChangeEvents: %w", err) + return fmt.Errorf("fsnotify.dirChange: %w", err) } } return nil } -// sendFileCreatedEvent sends a create event if the file isn't already being tracked. -func (w *Watcher) sendFileCreatedEventIfNew(filePath string, fi os.FileInfo) (err error) { - w.mu.Lock() - _, doesExist := w.fileExists[filePath] - w.mu.Unlock() - if !doesExist { - if !w.sendEvent(Event{Name: filePath, Op: Create}) { - return +// Send a create event if the file isn't already being tracked, and start +// watching this file. +func (w *kqueue) sendCreateIfNew(path string, fi os.FileInfo) error { + if !w.watches.seenBefore(path) { + if !w.sendEvent(Event{Name: path, Op: Create}) { + return nil } } - // like watchDirectoryFiles (but without doing another ReadDir) - filePath, err = w.internalWatch(filePath, fi) + // Like watchDirectoryFiles, but without doing another ReadDir. + path, err := w.internalWatch(path, fi) if err != nil { return err } - - w.mu.Lock() - w.fileExists[filePath] = struct{}{} - w.mu.Unlock() - + w.watches.markSeen(path, true) return nil } -func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { +func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { if fi.IsDir() { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory - w.mu.Lock() - flags := w.dirFlags[name] - w.mu.Unlock() - - flags |= unix.NOTE_DELETE | unix.NOTE_RENAME - return w.addWatch(name, flags) + info, _ := w.watches.byPath(name) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) } // watch file to mimic Linux inotify @@ -756,7 +696,7 @@ func (w *Watcher) internalWatch(name string, fi os.FileInfo) (string, error) { } // Register events with the queue. -func (w *Watcher) register(fds []int, flags int, fflags uint32) error { +func (w *kqueue) register(fds []int, flags int, fflags uint32) error { changes := make([]unix.Kevent_t, len(fds)) for i, fd := range fds { // SetKevent converts int to the platform-specific types. @@ -773,10 +713,21 @@ func (w *Watcher) register(fds []int, flags int, fflags uint32) error { } // read retrieves pending events, or waits until an event occurs. -func (w *Watcher) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { +func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { n, err := unix.Kevent(w.kq, nil, events, nil) if err != nil { return nil, err } return events[0:n], nil } + +func (w *kqueue) xSupports(op Op) bool { + if runtime.GOOS == "freebsd" { + //return true // Supports everything. + } + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index d34a23c015..5eb5dbc66f 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -1,205 +1,23 @@ //go:build appengine || (!darwin && !dragonfly && !freebsd && !openbsd && !linux && !netbsd && !solaris && !windows) -// +build appengine !darwin,!dragonfly,!freebsd,!openbsd,!linux,!netbsd,!solaris,!windows - -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify import "errors" -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type other struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } - -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { return NewWatcher() } - -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { return nil } - -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { return nil } - -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return nil } - -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { return nil } - -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { return nil } +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { + return newBackend(ev, errs) +} +func (w *other) Close() error { return nil } +func (w *other) WatchList() []string { return nil } +func (w *other) Add(name string) error { return nil } +func (w *other) AddWith(name string, opts ...addOpt) error { return nil } +func (w *other) Remove(name string) error { return nil } +func (w *other) xSupports(op Op) bool { return false } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index 9bc91e5d61..c54a630838 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -1,12 +1,8 @@ //go:build windows -// +build windows // Windows backend based on ReadDirectoryChangesW() // // https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw -// -// Note: the documentation on the Watcher type and methods is generated from -// mkdoc.zsh package fsnotify @@ -19,123 +15,15 @@ import ( "runtime" "strings" "sync" + "time" "unsafe" + "github.com/fsnotify/fsnotify/internal" "golang.org/x/sys/windows" ) -// Watcher watches a set of paths, delivering events on a channel. -// -// A watcher should not be copied (e.g. pass it by pointer, rather than by -// value). -// -// # Linux notes -// -// When a file is removed a Remove event won't be emitted until all file -// descriptors are closed, and deletes will always emit a Chmod. For example: -// -// fp := os.Open("file") -// os.Remove("file") // Triggers Chmod -// fp.Close() // Triggers Remove -// -// This is the event that inotify sends, so not much can be changed about this. -// -// The fs.inotify.max_user_watches sysctl variable specifies the upper limit -// for the number of watches per user, and fs.inotify.max_user_instances -// specifies the maximum number of inotify instances per user. Every Watcher you -// create is an "instance", and every path you add is a "watch". -// -// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and -// /proc/sys/fs/inotify/max_user_instances -// -// To increase them you can use sysctl or write the value to the /proc file: -// -// # Default values on Linux 5.18 -// sysctl fs.inotify.max_user_watches=124983 -// sysctl fs.inotify.max_user_instances=128 -// -// To make the changes persist on reboot edit /etc/sysctl.conf or -// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check -// your distro's documentation): -// -// fs.inotify.max_user_watches=124983 -// fs.inotify.max_user_instances=128 -// -// Reaching the limit will result in a "no space left on device" or "too many open -// files" error. -// -// # kqueue notes (macOS, BSD) -// -// kqueue requires opening a file descriptor for every file that's being watched; -// so if you're watching a directory with five files then that's six file -// descriptors. You will run in to your system's "max open files" limit faster on -// these platforms. -// -// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to -// control the maximum number of open files, as well as /etc/login.conf on BSD -// systems. -// -// # Windows notes -// -// Paths can be added as "C:\path\to\dir", but forward slashes -// ("C:/path/to/dir") will also work. -// -// When a watched directory is removed it will always send an event for the -// directory itself, but may not send events for all files in that directory. -// Sometimes it will send events for all times, sometimes it will send no -// events, and often only for some files. -// -// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest -// value that is guaranteed to work with SMB filesystems. If you have many -// events in quick succession this may not be enough, and you will have to use -// [WithBufferSize] to increase the value. -type Watcher struct { - // Events sends the filesystem change events. - // - // fsnotify can send the following events; a "path" here can refer to a - // file, directory, symbolic link, or special file like a FIFO. - // - // fsnotify.Create A new path was created; this may be followed by one - // or more Write events if data also gets written to a - // file. - // - // fsnotify.Remove A path was removed. - // - // fsnotify.Rename A path was renamed. A rename is always sent with the - // old path as Event.Name, and a Create event will be - // sent with the new name. Renames are only sent for - // paths that are currently watched; e.g. moving an - // unmonitored file into a monitored directory will - // show up as just a Create. Similarly, renaming a file - // to outside a monitored directory will show up as - // only a Rename. - // - // fsnotify.Write A file or named pipe was written to. A Truncate will - // also trigger a Write. A single "write action" - // initiated by the user may show up as one or multiple - // writes, depending on when the system syncs things to - // disk. For example when compiling a large Go program - // you may get hundreds of Write events, and you may - // want to wait until you've stopped receiving them - // (see the dedup example in cmd/fsnotify). - // - // Some systems may send Write event for directories - // when the directory content changes. - // - // fsnotify.Chmod Attributes were changed. On Linux this is also sent - // when a file is removed (or more accurately, when a - // link to an inode is removed). On kqueue it's sent - // when a file is truncated. On Windows it's never - // sent. +type readDirChangesW struct { Events chan Event - - // Errors sends any errors. - // - // ErrEventOverflow is used to indicate there are too many events: - // - // - inotify: There are too many queued events (fs.inotify.max_queued_events sysctl) - // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. - // - kqueue, fen: Not used. Errors chan error port windows.Handle // Handle to completion port @@ -147,48 +35,40 @@ type Watcher struct { closed bool // Set to true when Close() is first called } -// NewWatcher creates a new Watcher. -func NewWatcher() (*Watcher, error) { - return NewBufferedWatcher(50) +func newBackend(ev chan Event, errs chan error) (backend, error) { + return newBufferedBackend(50, ev, errs) } -// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events -// channel. -// -// The main use case for this is situations with a very large number of events -// where the kernel buffer size can't be increased (e.g. due to lack of -// permissions). An unbuffered Watcher will perform better for almost all use -// cases, and whenever possible you will be better off increasing the kernel -// buffers instead of adding a large userspace buffer. -func NewBufferedWatcher(sz uint) (*Watcher, error) { +func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) } - w := &Watcher{ + w := &readDirChangesW{ + Events: ev, + Errors: errs, port: port, watches: make(watchMap), input: make(chan *input, 1), - Events: make(chan Event, sz), - Errors: make(chan error), quit: make(chan chan<- error, 1), } go w.readEvents() return w, nil } -func (w *Watcher) isClosed() bool { +func (w *readDirChangesW) isClosed() bool { w.mu.Lock() defer w.mu.Unlock() return w.closed } -func (w *Watcher) sendEvent(name string, mask uint64) bool { +func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool { if mask == 0 { return false } event := w.newEvent(name, uint32(mask)) + event.renamedFrom = renamedFrom select { case ch := <-w.quit: w.quit <- ch @@ -198,17 +78,19 @@ func (w *Watcher) sendEvent(name string, mask uint64) bool { } // Returns true if the error was sent, or false if watcher is closed. -func (w *Watcher) sendError(err error) bool { +func (w *readDirChangesW) sendError(err error) bool { + if err == nil { + return true + } select { case w.Errors <- err: return true case <-w.quit: + return false } - return false } -// Close removes all watches and closes the Events channel. -func (w *Watcher) Close() error { +func (w *readDirChangesW) Close() error { if w.isClosed() { return nil } @@ -226,57 +108,21 @@ func (w *Watcher) Close() error { return <-ch } -// Add starts monitoring the path for changes. -// -// A path can only be watched once; watching it more than once is a no-op and will -// not return an error. Paths that do not yet exist on the filesystem cannot be -// watched. -// -// A watch will be automatically removed if the watched path is deleted or -// renamed. The exception is the Windows backend, which doesn't remove the -// watcher on renames. -// -// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special -// filesystems (/proc, /sys, etc.) generally don't work. -// -// Returns [ErrClosed] if [Watcher.Close] was called. -// -// See [Watcher.AddWith] for a version that allows adding options. -// -// # Watching directories -// -// All files in a directory are monitored, including new files that are created -// after the watcher is started. Subdirectories are not watched (i.e. it's -// non-recursive). -// -// # Watching files -// -// Watching individual files (rather than directories) is generally not -// recommended as many programs (especially editors) update files atomically: it -// will write to a temporary file which is then moved to to destination, -// overwriting the original (or some variant thereof). The watcher on the -// original file is now lost, as that no longer exists. -// -// The upshot of this is that a power failure or crash won't leave a -// half-written file. -// -// Watch the parent directory and use Event.Name to filter out files you're not -// interested in. There is an example of this in cmd/fsnotify/file.go. -func (w *Watcher) Add(name string) error { return w.AddWith(name) } +func (w *readDirChangesW) Add(name string) error { return w.AddWith(name) } -// AddWith is like [Watcher.Add], but allows adding options. When using Add() -// the defaults described below are used. -// -// Possible options are: -// -// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on -// other platforms. The default is 64K (65536 bytes). -func (w *Watcher) AddWith(name string, opts ...addOpt) error { +func (w *readDirChangesW) AddWith(name string, opts ...addOpt) error { if w.isClosed() { return ErrClosed } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s AddWith(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } with := getOptions(opts...) + if !w.xSupports(with.op) { + return fmt.Errorf("%w: %s", xErrUnsupported, with.op) + } if with.bufsize < 4096 { return fmt.Errorf("fsnotify.WithBufferSize: buffer size cannot be smaller than 4096 bytes") } @@ -295,18 +141,14 @@ func (w *Watcher) AddWith(name string, opts ...addOpt) error { return <-in.reply } -// Remove stops monitoring the path for changes. -// -// Directories are always removed non-recursively. For example, if you added -// /tmp/dir and /tmp/dir/subdir then you will need to remove both. -// -// Removing a path that has not yet been added returns [ErrNonExistentWatch]. -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) Remove(name string) error { +func (w *readDirChangesW) Remove(name string) error { if w.isClosed() { return nil } + if debug { + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", + time.Now().Format("15:04:05.000000000"), filepath.ToSlash(name)) + } in := &input{ op: opRemoveWatch, @@ -320,11 +162,7 @@ func (w *Watcher) Remove(name string) error { return <-in.reply } -// WatchList returns all paths explicitly added with [Watcher.Add] (and are not -// yet removed). -// -// Returns nil if [Watcher.Close] was called. -func (w *Watcher) WatchList() []string { +func (w *readDirChangesW) WatchList() []string { if w.isClosed() { return nil } @@ -335,7 +173,13 @@ func (w *Watcher) WatchList() []string { entries := make([]string, 0, len(w.watches)) for _, entry := range w.watches { for _, watchEntry := range entry { - entries = append(entries, watchEntry.path) + for name := range watchEntry.names { + entries = append(entries, filepath.Join(watchEntry.path, name)) + } + // the directory itself is being watched + if watchEntry.mask != 0 { + entries = append(entries, watchEntry.path) + } } } @@ -361,7 +205,7 @@ const ( sysFSIGNORED = 0x8000 ) -func (w *Watcher) newEvent(name string, mask uint32) Event { +func (w *readDirChangesW) newEvent(name string, mask uint32) Event { e := Event{Name: name} if mask&sysFSCREATE == sysFSCREATE || mask&sysFSMOVEDTO == sysFSMOVEDTO { e.Op |= Create @@ -417,7 +261,7 @@ type ( watchMap map[uint32]indexMap ) -func (w *Watcher) wakeupReader() error { +func (w *readDirChangesW) wakeupReader() error { err := windows.PostQueuedCompletionStatus(w.port, 0, 0, nil) if err != nil { return os.NewSyscallError("PostQueuedCompletionStatus", err) @@ -425,7 +269,7 @@ func (w *Watcher) wakeupReader() error { return nil } -func (w *Watcher) getDir(pathname string) (dir string, err error) { +func (w *readDirChangesW) getDir(pathname string) (dir string, err error) { attr, err := windows.GetFileAttributes(windows.StringToUTF16Ptr(pathname)) if err != nil { return "", os.NewSyscallError("GetFileAttributes", err) @@ -439,7 +283,7 @@ func (w *Watcher) getDir(pathname string) (dir string, err error) { return } -func (w *Watcher) getIno(path string) (ino *inode, err error) { +func (w *readDirChangesW) getIno(path string) (ino *inode, err error) { h, err := windows.CreateFile(windows.StringToUTF16Ptr(path), windows.FILE_LIST_DIRECTORY, windows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE, @@ -482,9 +326,8 @@ func (m watchMap) set(ino *inode, watch *watch) { } // Must run within the I/O thread. -func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { - //pathname, recurse := recursivePath(pathname) - recurse := false +func (w *readDirChangesW) addWatch(pathname string, flags uint64, bufsize int) error { + pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) if err != nil { @@ -538,7 +381,7 @@ func (w *Watcher) addWatch(pathname string, flags uint64, bufsize int) error { } // Must run within the I/O thread. -func (w *Watcher) remWatch(pathname string) error { +func (w *readDirChangesW) remWatch(pathname string) error { pathname, recurse := recursivePath(pathname) dir, err := w.getDir(pathname) @@ -566,11 +409,11 @@ func (w *Watcher) remWatch(pathname string) error { return fmt.Errorf("%w: %s", ErrNonExistentWatch, pathname) } if pathname == dir { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) watch.mask = 0 } else { name := filepath.Base(pathname) - w.sendEvent(filepath.Join(watch.path, name), watch.names[name]&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } @@ -578,23 +421,23 @@ func (w *Watcher) remWatch(pathname string) error { } // Must run within the I/O thread. -func (w *Watcher) deleteWatch(watch *watch) { +func (w *readDirChangesW) deleteWatch(watch *watch) { for name, mask := range watch.names { if mask&provisional == 0 { - w.sendEvent(filepath.Join(watch.path, name), mask&sysFSIGNORED) + w.sendEvent(filepath.Join(watch.path, name), "", mask&sysFSIGNORED) } delete(watch.names, name) } if watch.mask != 0 { if watch.mask&provisional == 0 { - w.sendEvent(watch.path, watch.mask&sysFSIGNORED) + w.sendEvent(watch.path, "", watch.mask&sysFSIGNORED) } watch.mask = 0 } } // Must run within the I/O thread. -func (w *Watcher) startRead(watch *watch) error { +func (w *readDirChangesW) startRead(watch *watch) error { err := windows.CancelIo(watch.ino.handle) if err != nil { w.sendError(os.NewSyscallError("CancelIo", err)) @@ -624,7 +467,7 @@ func (w *Watcher) startRead(watch *watch) error { err := os.NewSyscallError("ReadDirectoryChanges", rdErr) if rdErr == windows.ERROR_ACCESS_DENIED && watch.mask&provisional == 0 { // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) err = nil } w.deleteWatch(watch) @@ -637,7 +480,7 @@ func (w *Watcher) startRead(watch *watch) error { // readEvents reads from the I/O completion port, converts the // received events into Event objects and sends them via the Events channel. // Entry point to the I/O thread. -func (w *Watcher) readEvents() { +func (w *readDirChangesW) readEvents() { var ( n uint32 key uintptr @@ -700,7 +543,7 @@ func (w *Watcher) readEvents() { } case windows.ERROR_ACCESS_DENIED: // Watched directory was probably removed - w.sendEvent(watch.path, watch.mask&sysFSDELETESELF) + w.sendEvent(watch.path, "", watch.mask&sysFSDELETESELF) w.deleteWatch(watch) w.startRead(watch) continue @@ -733,6 +576,10 @@ func (w *Watcher) readEvents() { name := windows.UTF16ToString(buf) fullname := filepath.Join(watch.path, name) + if debug { + internal.Debug(fullname, raw.Action) + } + var mask uint64 switch raw.Action { case windows.FILE_ACTION_REMOVED: @@ -761,21 +608,22 @@ func (w *Watcher) readEvents() { } } - sendNameEvent := func() { - w.sendEvent(fullname, watch.names[name]&mask) - } if raw.Action != windows.FILE_ACTION_RENAMED_NEW_NAME { - sendNameEvent() + w.sendEvent(fullname, "", watch.names[name]&mask) } if raw.Action == windows.FILE_ACTION_REMOVED { - w.sendEvent(fullname, watch.names[name]&sysFSIGNORED) + w.sendEvent(fullname, "", watch.names[name]&sysFSIGNORED) delete(watch.names, name) } - w.sendEvent(fullname, watch.mask&w.toFSnotifyFlags(raw.Action)) + if watch.rename != "" && raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { + w.sendEvent(fullname, filepath.Join(watch.path, watch.rename), watch.mask&w.toFSnotifyFlags(raw.Action)) + } else { + w.sendEvent(fullname, "", watch.mask&w.toFSnotifyFlags(raw.Action)) + } + if raw.Action == windows.FILE_ACTION_RENAMED_NEW_NAME { - fullname = filepath.Join(watch.path, watch.rename) - sendNameEvent() + w.sendEvent(filepath.Join(watch.path, watch.rename), "", watch.names[name]&mask) } // Move to the next event in the buffer @@ -787,8 +635,7 @@ func (w *Watcher) readEvents() { // Error! if offset >= n { //lint:ignore ST1005 Windows should be capitalized - w.sendError(errors.New( - "Windows system assumed buffer larger than it is, events have likely been missed")) + w.sendError(errors.New("Windows system assumed buffer larger than it is, events have likely been missed")) break } } @@ -799,7 +646,7 @@ func (w *Watcher) readEvents() { } } -func (w *Watcher) toWindowsFlags(mask uint64) uint32 { +func (w *readDirChangesW) toWindowsFlags(mask uint64) uint32 { var m uint32 if mask&sysFSMODIFY != 0 { m |= windows.FILE_NOTIFY_CHANGE_LAST_WRITE @@ -810,7 +657,7 @@ func (w *Watcher) toWindowsFlags(mask uint64) uint32 { return m } -func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { +func (w *readDirChangesW) toFSnotifyFlags(action uint32) uint64 { switch action { case windows.FILE_ACTION_ADDED: return sysFSCREATE @@ -825,3 +672,11 @@ func (w *Watcher) toFSnotifyFlags(action uint32) uint64 { } return 0 } + +func (w *readDirChangesW) xSupports(op Op) bool { + if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || + op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { + return false + } + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 24c99cc499..0760efe916 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -3,19 +3,146 @@ // // Currently supported systems: // -// Linux 2.6.32+ via inotify -// BSD, macOS via kqueue -// Windows via ReadDirectoryChangesW -// illumos via FEN +// - Linux via inotify +// - BSD, macOS via kqueue +// - Windows via ReadDirectoryChangesW +// - illumos via FEN +// +// # FSNOTIFY_DEBUG +// +// Set the FSNOTIFY_DEBUG environment variable to "1" to print debug messages to +// stderr. This can be useful to track down some problems, especially in cases +// where fsnotify is used as an indirect dependency. +// +// Every event will be printed as soon as there's something useful to print, +// with as little processing from fsnotify. +// +// Example output: +// +// FSNOTIFY_DEBUG: 11:34:23.633087586 256:IN_CREATE → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:23.633202319 4:IN_ATTRIB → "/tmp/file-1" +// FSNOTIFY_DEBUG: 11:34:28.989728764 512:IN_DELETE → "/tmp/file-1" package fsnotify import ( "errors" "fmt" + "os" "path/filepath" "strings" ) +// Watcher watches a set of paths, delivering events on a channel. +// +// A watcher should not be copied (e.g. pass it by pointer, rather than by +// value). +// +// # Linux notes +// +// When a file is removed a Remove event won't be emitted until all file +// descriptors are closed, and deletes will always emit a Chmod. For example: +// +// fp := os.Open("file") +// os.Remove("file") // Triggers Chmod +// fp.Close() // Triggers Remove +// +// This is the event that inotify sends, so not much can be changed about this. +// +// The fs.inotify.max_user_watches sysctl variable specifies the upper limit +// for the number of watches per user, and fs.inotify.max_user_instances +// specifies the maximum number of inotify instances per user. Every Watcher you +// create is an "instance", and every path you add is a "watch". +// +// These are also exposed in /proc as /proc/sys/fs/inotify/max_user_watches and +// /proc/sys/fs/inotify/max_user_instances +// +// To increase them you can use sysctl or write the value to the /proc file: +// +// # Default values on Linux 5.18 +// sysctl fs.inotify.max_user_watches=124983 +// sysctl fs.inotify.max_user_instances=128 +// +// To make the changes persist on reboot edit /etc/sysctl.conf or +// /usr/lib/sysctl.d/50-default.conf (details differ per Linux distro; check +// your distro's documentation): +// +// fs.inotify.max_user_watches=124983 +// fs.inotify.max_user_instances=128 +// +// Reaching the limit will result in a "no space left on device" or "too many open +// files" error. +// +// # kqueue notes (macOS, BSD) +// +// kqueue requires opening a file descriptor for every file that's being watched; +// so if you're watching a directory with five files then that's six file +// descriptors. You will run in to your system's "max open files" limit faster on +// these platforms. +// +// The sysctl variables kern.maxfiles and kern.maxfilesperproc can be used to +// control the maximum number of open files, as well as /etc/login.conf on BSD +// systems. +// +// # Windows notes +// +// Paths can be added as "C:\\path\\to\\dir", but forward slashes +// ("C:/path/to/dir") will also work. +// +// When a watched directory is removed it will always send an event for the +// directory itself, but may not send events for all files in that directory. +// Sometimes it will send events for all files, sometimes it will send no +// events, and often only for some files. +// +// The default ReadDirectoryChangesW() buffer size is 64K, which is the largest +// value that is guaranteed to work with SMB filesystems. If you have many +// events in quick succession this may not be enough, and you will have to use +// [WithBufferSize] to increase the value. +type Watcher struct { + b backend + + // Events sends the filesystem change events. + // + // fsnotify can send the following events; a "path" here can refer to a + // file, directory, symbolic link, or special file like a FIFO. + // + // fsnotify.Create A new path was created; this may be followed by one + // or more Write events if data also gets written to a + // file. + // + // fsnotify.Remove A path was removed. + // + // fsnotify.Rename A path was renamed. A rename is always sent with the + // old path as Event.Name, and a Create event will be + // sent with the new name. Renames are only sent for + // paths that are currently watched; e.g. moving an + // unmonitored file into a monitored directory will + // show up as just a Create. Similarly, renaming a file + // to outside a monitored directory will show up as + // only a Rename. + // + // fsnotify.Write A file or named pipe was written to. A Truncate will + // also trigger a Write. A single "write action" + // initiated by the user may show up as one or multiple + // writes, depending on when the system syncs things to + // disk. For example when compiling a large Go program + // you may get hundreds of Write events, and you may + // want to wait until you've stopped receiving them + // (see the dedup example in cmd/fsnotify). + // + // Some systems may send Write event for directories + // when the directory content changes. + // + // fsnotify.Chmod Attributes were changed. On Linux this is also sent + // when a file is removed (or more accurately, when a + // link to an inode is removed). On kqueue it's sent + // when a file is truncated. On Windows it's never + // sent. + Events chan Event + + // Errors sends any errors. + Errors chan error +} + // Event represents a file system notification. type Event struct { // Path to the file or directory. @@ -30,6 +157,16 @@ type Event struct { // This is a bitmask and some systems may send multiple operations at once. // Use the Event.Has() method instead of comparing with ==. Op Op + + // Create events will have this set to the old path if it's a rename. This + // only works when both the source and destination are watched. It's not + // reliable when watching individual files, only directories. + // + // For example "mv /tmp/file /tmp/rename" will emit: + // + // Event{Op: Rename, Name: "/tmp/file"} + // Event{Op: Create, Name: "/tmp/rename", RenamedFrom: "/tmp/file"} + renamedFrom string } // Op describes a set of file operations. @@ -50,7 +187,7 @@ const ( // example "remove to trash" is often a rename). Remove - // The path was renamed to something else; any watched on it will be + // The path was renamed to something else; any watches on it will be // removed. Rename @@ -60,15 +197,155 @@ const ( // get triggered very frequently by some software. For example, Spotlight // indexing on macOS, anti-virus software, backup software, etc. Chmod + + // File descriptor was opened. + // + // Only works on Linux and FreeBSD. + xUnportableOpen + + // File was read from. + // + // Only works on Linux and FreeBSD. + xUnportableRead + + // File opened for writing was closed. + // + // Only works on Linux and FreeBSD. + // + // The advantage of using this over Write is that it's more reliable than + // waiting for Write events to stop. It's also faster (if you're not + // listening to Write events): copying a file of a few GB can easily + // generate tens of thousands of Write events in a short span of time. + xUnportableCloseWrite + + // File opened for reading was closed. + // + // Only works on Linux and FreeBSD. + xUnportableCloseRead ) -// Common errors that can be reported. var ( + // ErrNonExistentWatch is used when Remove() is called on a path that's not + // added. ErrNonExistentWatch = errors.New("fsnotify: can't remove non-existent watch") - ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") - ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrClosed is used when trying to operate on a closed Watcher. + ErrClosed = errors.New("fsnotify: watcher already closed") + + // ErrEventOverflow is reported from the Errors channel when there are too + // many events: + // + // - inotify: inotify returns IN_Q_OVERFLOW – because there are too + // many queued events (the fs.inotify.max_queued_events + // sysctl can be used to increase this). + // - windows: The buffer size is too small; WithBufferSize() can be used to increase it. + // - kqueue, fen: Not used. + ErrEventOverflow = errors.New("fsnotify: queue or buffer overflow") + + // ErrUnsupported is returned by AddWith() when WithOps() specified an + // Unportable event that's not supported on this platform. + xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) +// NewWatcher creates a new Watcher. +func NewWatcher() (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBackend(ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// NewBufferedWatcher creates a new Watcher with a buffered Watcher.Events +// channel. +// +// The main use case for this is situations with a very large number of events +// where the kernel buffer size can't be increased (e.g. due to lack of +// permissions). An unbuffered Watcher will perform better for almost all use +// cases, and whenever possible you will be better off increasing the kernel +// buffers instead of adding a large userspace buffer. +func NewBufferedWatcher(sz uint) (*Watcher, error) { + ev, errs := make(chan Event), make(chan error) + b, err := newBufferedBackend(sz, ev, errs) + if err != nil { + return nil, err + } + return &Watcher{b: b, Events: ev, Errors: errs}, nil +} + +// Add starts monitoring the path for changes. +// +// A path can only be watched once; watching it more than once is a no-op and will +// not return an error. Paths that do not yet exist on the filesystem cannot be +// watched. +// +// A watch will be automatically removed if the watched path is deleted or +// renamed. The exception is the Windows backend, which doesn't remove the +// watcher on renames. +// +// Notifications on network filesystems (NFS, SMB, FUSE, etc.) or special +// filesystems (/proc, /sys, etc.) generally don't work. +// +// Returns [ErrClosed] if [Watcher.Close] was called. +// +// See [Watcher.AddWith] for a version that allows adding options. +// +// # Watching directories +// +// All files in a directory are monitored, including new files that are created +// after the watcher is started. Subdirectories are not watched (i.e. it's +// non-recursive). +// +// # Watching files +// +// Watching individual files (rather than directories) is generally not +// recommended as many programs (especially editors) update files atomically: it +// will write to a temporary file which is then moved to destination, +// overwriting the original (or some variant thereof). The watcher on the +// original file is now lost, as that no longer exists. +// +// The upshot of this is that a power failure or crash won't leave a +// half-written file. +// +// Watch the parent directory and use Event.Name to filter out files you're not +// interested in. There is an example of this in cmd/fsnotify/file.go. +func (w *Watcher) Add(path string) error { return w.b.Add(path) } + +// AddWith is like [Watcher.Add], but allows adding options. When using Add() +// the defaults described below are used. +// +// Possible options are: +// +// - [WithBufferSize] sets the buffer size for the Windows backend; no-op on +// other platforms. The default is 64K (65536 bytes). +func (w *Watcher) AddWith(path string, opts ...addOpt) error { return w.b.AddWith(path, opts...) } + +// Remove stops monitoring the path for changes. +// +// Directories are always removed non-recursively. For example, if you added +// /tmp/dir and /tmp/dir/subdir then you will need to remove both. +// +// Removing a path that has not yet been added returns [ErrNonExistentWatch]. +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) Remove(path string) error { return w.b.Remove(path) } + +// Close removes all watches and closes the Events channel. +func (w *Watcher) Close() error { return w.b.Close() } + +// WatchList returns all paths explicitly added with [Watcher.Add] (and are not +// yet removed). +// +// Returns nil if [Watcher.Close] was called. +func (w *Watcher) WatchList() []string { return w.b.WatchList() } + +// Supports reports if all the listed operations are supported by this platform. +// +// Create, Write, Remove, Rename, and Chmod are always supported. It can only +// return false for an Op starting with Unportable. +func (w *Watcher) xSupports(op Op) bool { return w.b.xSupports(op) } + func (o Op) String() string { var b strings.Builder if o.Has(Create) { @@ -80,6 +357,18 @@ func (o Op) String() string { if o.Has(Write) { b.WriteString("|WRITE") } + if o.Has(xUnportableOpen) { + b.WriteString("|OPEN") + } + if o.Has(xUnportableRead) { + b.WriteString("|READ") + } + if o.Has(xUnportableCloseWrite) { + b.WriteString("|CLOSE_WRITE") + } + if o.Has(xUnportableCloseRead) { + b.WriteString("|CLOSE_READ") + } if o.Has(Rename) { b.WriteString("|RENAME") } @@ -100,24 +389,48 @@ func (e Event) Has(op Op) bool { return e.Op.Has(op) } // String returns a string representation of the event with their path. func (e Event) String() string { + if e.renamedFrom != "" { + return fmt.Sprintf("%-13s %q ← %q", e.Op.String(), e.Name, e.renamedFrom) + } return fmt.Sprintf("%-13s %q", e.Op.String(), e.Name) } type ( + backend interface { + Add(string) error + AddWith(string, ...addOpt) error + Remove(string) error + WatchList() []string + Close() error + xSupports(Op) bool + } addOpt func(opt *withOpts) withOpts struct { - bufsize int + bufsize int + op Op + noFollow bool + sendCreate bool } ) +var debug = func() bool { + // Check for exactly "1" (rather than mere existence) so we can add + // options/flags in the future. I don't know if we ever want that, but it's + // nice to leave the option open. + return os.Getenv("FSNOTIFY_DEBUG") == "1" +}() + var defaultOpts = withOpts{ bufsize: 65536, // 64K + op: Create | Write | Remove | Rename | Chmod, } func getOptions(opts ...addOpt) withOpts { with := defaultOpts for _, o := range opts { - o(&with) + if o != nil { + o(&with) + } } return with } @@ -136,9 +449,44 @@ func WithBufferSize(bytes int) addOpt { return func(opt *withOpts) { opt.bufsize = bytes } } +// WithOps sets which operations to listen for. The default is [Create], +// [Write], [Remove], [Rename], and [Chmod]. +// +// Excluding operations you're not interested in can save quite a bit of CPU +// time; in some use cases there may be hundreds of thousands of useless Write +// or Chmod operations per second. +// +// This can also be used to add unportable operations not supported by all +// platforms; unportable operations all start with "Unportable": +// [UnportableOpen], [UnportableRead], [UnportableCloseWrite], and +// [UnportableCloseRead]. +// +// AddWith returns an error when using an unportable operation that's not +// supported. Use [Watcher.Support] to check for support. +func withOps(op Op) addOpt { + return func(opt *withOpts) { opt.op = op } +} + +// WithNoFollow disables following symlinks, so the symlinks themselves are +// watched. +func withNoFollow() addOpt { + return func(opt *withOpts) { opt.noFollow = true } +} + +// "Internal" option for recursive watches on inotify. +func withCreate() addOpt { + return func(opt *withOpts) { opt.sendCreate = true } +} + +var enableRecurse = false + // Check if this path is recursive (ends with "/..." or "\..."), and return the // path with the /... stripped. func recursivePath(path string) (string, bool) { + path = filepath.Clean(path) + if !enableRecurse { // Only enabled in tests for now. + return path, false + } if filepath.Base(path) == "..." { return filepath.Dir(path), true } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go new file mode 100644 index 0000000000..b0eab10090 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -0,0 +1,39 @@ +//go:build darwin + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ +func SetRlimit() { + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = l.Cur + + if n, err := syscall.SysctlUint32("kern.maxfiles"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } + + if n, err := syscall.SysctlUint32("kern.maxfilesperproc"); err == nil && uint64(n) < maxfiles { + maxfiles = uint64(n) + } +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go new file mode 100644 index 0000000000..928319fb09 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_darwin.go @@ -0,0 +1,57 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSOLUTE", unix.NOTE_ABSOLUTE}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_BACKGROUND", unix.NOTE_BACKGROUND}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CRITICAL", unix.NOTE_CRITICAL}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXITSTATUS", unix.NOTE_EXITSTATUS}, + {"NOTE_EXIT_CSERROR", unix.NOTE_EXIT_CSERROR}, + {"NOTE_EXIT_DECRYPTFAIL", unix.NOTE_EXIT_DECRYPTFAIL}, + {"NOTE_EXIT_DETAIL", unix.NOTE_EXIT_DETAIL}, + {"NOTE_EXIT_DETAIL_MASK", unix.NOTE_EXIT_DETAIL_MASK}, + {"NOTE_EXIT_MEMORY", unix.NOTE_EXIT_MEMORY}, + {"NOTE_EXIT_REPARENTED", unix.NOTE_EXIT_REPARENTED}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_FUNLOCK", unix.NOTE_FUNLOCK}, + {"NOTE_LEEWAY", unix.NOTE_LEEWAY}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MACHTIME", unix.NOTE_MACHTIME}, + {"NOTE_MACH_CONTINUOUS_TIME", unix.NOTE_MACH_CONTINUOUS_TIME}, + {"NOTE_NONE", unix.NOTE_NONE}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OOB", unix.NOTE_OOB}, + //{"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, -0x100000 (?!) + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_REAP", unix.NOTE_REAP}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_SIGNAL", unix.NOTE_SIGNAL}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_VM_ERROR", unix.NOTE_VM_ERROR}, + {"NOTE_VM_PRESSURE", unix.NOTE_VM_PRESSURE}, + {"NOTE_VM_PRESSURE_SUDDEN_TERMINATE", unix.NOTE_VM_PRESSURE_SUDDEN_TERMINATE}, + {"NOTE_VM_PRESSURE_TERMINATE", unix.NOTE_VM_PRESSURE_TERMINATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go new file mode 100644 index 0000000000..3186b0c349 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_dragonfly.go @@ -0,0 +1,33 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_OOB", unix.NOTE_OOB}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go new file mode 100644 index 0000000000..f69fdb930f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_freebsd.go @@ -0,0 +1,42 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ABSTIME", unix.NOTE_ABSTIME}, + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_CLOSE", unix.NOTE_CLOSE}, + {"NOTE_CLOSE_WRITE", unix.NOTE_CLOSE_WRITE}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FFAND", unix.NOTE_FFAND}, + {"NOTE_FFCOPY", unix.NOTE_FFCOPY}, + {"NOTE_FFCTRLMASK", unix.NOTE_FFCTRLMASK}, + {"NOTE_FFLAGSMASK", unix.NOTE_FFLAGSMASK}, + {"NOTE_FFNOP", unix.NOTE_FFNOP}, + {"NOTE_FFOR", unix.NOTE_FFOR}, + {"NOTE_FILE_POLL", unix.NOTE_FILE_POLL}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_MSECONDS", unix.NOTE_MSECONDS}, + {"NOTE_NSECONDS", unix.NOTE_NSECONDS}, + {"NOTE_OPEN", unix.NOTE_OPEN}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_READ", unix.NOTE_READ}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_SECONDS", unix.NOTE_SECONDS}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRIGGER", unix.NOTE_TRIGGER}, + {"NOTE_USECONDS", unix.NOTE_USECONDS}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go new file mode 100644 index 0000000000..607e683bd7 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_kqueue.go @@ -0,0 +1,32 @@ +//go:build freebsd || openbsd || netbsd || dragonfly || darwin + +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, kevent *unix.Kevent_t) { + mask := uint32(kevent.Fflags) + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-60s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go new file mode 100644 index 0000000000..35c734be43 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_linux.go @@ -0,0 +1,56 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask, cookie uint32) { + names := []struct { + n string + m uint32 + }{ + {"IN_ACCESS", unix.IN_ACCESS}, + {"IN_ATTRIB", unix.IN_ATTRIB}, + {"IN_CLOSE", unix.IN_CLOSE}, + {"IN_CLOSE_NOWRITE", unix.IN_CLOSE_NOWRITE}, + {"IN_CLOSE_WRITE", unix.IN_CLOSE_WRITE}, + {"IN_CREATE", unix.IN_CREATE}, + {"IN_DELETE", unix.IN_DELETE}, + {"IN_DELETE_SELF", unix.IN_DELETE_SELF}, + {"IN_IGNORED", unix.IN_IGNORED}, + {"IN_ISDIR", unix.IN_ISDIR}, + {"IN_MODIFY", unix.IN_MODIFY}, + {"IN_MOVE", unix.IN_MOVE}, + {"IN_MOVED_FROM", unix.IN_MOVED_FROM}, + {"IN_MOVED_TO", unix.IN_MOVED_TO}, + {"IN_MOVE_SELF", unix.IN_MOVE_SELF}, + {"IN_OPEN", unix.IN_OPEN}, + {"IN_Q_OVERFLOW", unix.IN_Q_OVERFLOW}, + {"IN_UNMOUNT", unix.IN_UNMOUNT}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + var c string + if cookie > 0 { + c = fmt.Sprintf("(cookie: %d) ", cookie) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-30s → %s%q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, "|"), c, name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go new file mode 100644 index 0000000000..e5b3b6f694 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_netbsd.go @@ -0,0 +1,25 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go new file mode 100644 index 0000000000..1dd455bc5a --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_openbsd.go @@ -0,0 +1,28 @@ +package internal + +import "golang.org/x/sys/unix" + +var names = []struct { + n string + m uint32 +}{ + {"NOTE_ATTRIB", unix.NOTE_ATTRIB}, + // {"NOTE_CHANGE", unix.NOTE_CHANGE}, // Not on 386? + {"NOTE_CHILD", unix.NOTE_CHILD}, + {"NOTE_DELETE", unix.NOTE_DELETE}, + {"NOTE_EOF", unix.NOTE_EOF}, + {"NOTE_EXEC", unix.NOTE_EXEC}, + {"NOTE_EXIT", unix.NOTE_EXIT}, + {"NOTE_EXTEND", unix.NOTE_EXTEND}, + {"NOTE_FORK", unix.NOTE_FORK}, + {"NOTE_LINK", unix.NOTE_LINK}, + {"NOTE_LOWAT", unix.NOTE_LOWAT}, + {"NOTE_PCTRLMASK", unix.NOTE_PCTRLMASK}, + {"NOTE_PDATAMASK", unix.NOTE_PDATAMASK}, + {"NOTE_RENAME", unix.NOTE_RENAME}, + {"NOTE_REVOKE", unix.NOTE_REVOKE}, + {"NOTE_TRACK", unix.NOTE_TRACK}, + {"NOTE_TRACKERR", unix.NOTE_TRACKERR}, + {"NOTE_TRUNCATE", unix.NOTE_TRUNCATE}, + {"NOTE_WRITE", unix.NOTE_WRITE}, +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go new file mode 100644 index 0000000000..f1b2e73bd5 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_solaris.go @@ -0,0 +1,45 @@ +package internal + +import ( + "fmt" + "os" + "strings" + "time" + + "golang.org/x/sys/unix" +) + +func Debug(name string, mask int32) { + names := []struct { + n string + m int32 + }{ + {"FILE_ACCESS", unix.FILE_ACCESS}, + {"FILE_MODIFIED", unix.FILE_MODIFIED}, + {"FILE_ATTRIB", unix.FILE_ATTRIB}, + {"FILE_TRUNC", unix.FILE_TRUNC}, + {"FILE_NOFOLLOW", unix.FILE_NOFOLLOW}, + {"FILE_DELETE", unix.FILE_DELETE}, + {"FILE_RENAME_TO", unix.FILE_RENAME_TO}, + {"FILE_RENAME_FROM", unix.FILE_RENAME_FROM}, + {"UNMOUNTED", unix.UNMOUNTED}, + {"MOUNTEDOVER", unix.MOUNTEDOVER}, + {"FILE_EXCEPTION", unix.FILE_EXCEPTION}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %10d:%-30s → %q\n", + time.Now().Format("15:04:05.000000000"), mask, strings.Join(l, " | "), name) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go new file mode 100644 index 0000000000..52bf4ce53b --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/debug_windows.go @@ -0,0 +1,40 @@ +package internal + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "time" + + "golang.org/x/sys/windows" +) + +func Debug(name string, mask uint32) { + names := []struct { + n string + m uint32 + }{ + {"FILE_ACTION_ADDED", windows.FILE_ACTION_ADDED}, + {"FILE_ACTION_REMOVED", windows.FILE_ACTION_REMOVED}, + {"FILE_ACTION_MODIFIED", windows.FILE_ACTION_MODIFIED}, + {"FILE_ACTION_RENAMED_OLD_NAME", windows.FILE_ACTION_RENAMED_OLD_NAME}, + {"FILE_ACTION_RENAMED_NEW_NAME", windows.FILE_ACTION_RENAMED_NEW_NAME}, + } + + var ( + l []string + unknown = mask + ) + for _, n := range names { + if mask&n.m == n.m { + l = append(l, n.n) + unknown ^= n.m + } + } + if unknown > 0 { + l = append(l, fmt.Sprintf("0x%x", unknown)) + } + fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s %-65s → %q\n", + time.Now().Format("15:04:05.000000000"), strings.Join(l, " | "), filepath.ToSlash(name)) +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go new file mode 100644 index 0000000000..547df1df84 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -0,0 +1,31 @@ +//go:build freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, uint64(dev)) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/internal.go b/vendor/github.com/fsnotify/fsnotify/internal/internal.go new file mode 100644 index 0000000000..7daa45e19e --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/internal.go @@ -0,0 +1,2 @@ +// Package internal contains some helpers. +package internal diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go new file mode 100644 index 0000000000..30976ce973 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -0,0 +1,31 @@ +//go:build !windows && !darwin && !freebsd + +package internal + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +var ( + SyscallEACCES = syscall.EACCES + UnixEACCES = unix.EACCES +) + +var maxfiles uint64 + +func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ + var l syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) + if err == nil && l.Cur != l.Max { + l.Cur = l.Max + syscall.Setrlimit(syscall.RLIMIT_NOFILE, &l) + } + maxfiles = uint64(l.Cur) +} + +func Maxfiles() uint64 { return maxfiles } +func Mkfifo(path string, mode uint32) error { return unix.Mkfifo(path, mode) } +func Mknod(path string, mode uint32, dev int) error { return unix.Mknod(path, mode, dev) } diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix2.go b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go new file mode 100644 index 0000000000..37dfeddc28 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix2.go @@ -0,0 +1,7 @@ +//go:build !windows + +package internal + +func HasPrivilegesForSymlink() bool { + return true +} diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go new file mode 100644 index 0000000000..a72c649549 --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -0,0 +1,41 @@ +//go:build windows + +package internal + +import ( + "errors" + + "golang.org/x/sys/windows" +) + +// Just a dummy. +var ( + SyscallEACCES = errors.New("dummy") + UnixEACCES = errors.New("dummy") +) + +func SetRlimit() {} +func Maxfiles() uint64 { return 1<<64 - 1 } +func Mkfifo(path string, mode uint32) error { return errors.New("no FIFOs on Windows") } +func Mknod(path string, mode uint32, dev int) error { return errors.New("no device nodes on Windows") } + +func HasPrivilegesForSymlink() bool { + var sid *windows.SID + err := windows.AllocateAndInitializeSid( + &windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &sid) + if err != nil { + return false + } + defer windows.FreeSid(sid) + token := windows.Token(0) + member, err := token.IsMember(sid) + if err != nil { + return false + } + return member || token.IsElevated() +} diff --git a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh b/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh deleted file mode 100644 index 99012ae653..0000000000 --- a/vendor/github.com/fsnotify/fsnotify/mkdoc.zsh +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env zsh -[ "${ZSH_VERSION:-}" = "" ] && echo >&2 "Only works with zsh" && exit 1 -setopt err_exit no_unset pipefail extended_glob - -# Simple script to update the godoc comments on all watchers so you don't need -# to update the same comment 5 times. - -watcher=$(</tmp/x - print -r -- $cmt >>/tmp/x - tail -n+$(( end + 1 )) $file >>/tmp/x - mv /tmp/x $file - done -} - -set-cmt '^type Watcher struct ' $watcher -set-cmt '^func NewWatcher(' $new -set-cmt '^func NewBufferedWatcher(' $newbuffered -set-cmt '^func (w \*Watcher) Add(' $add -set-cmt '^func (w \*Watcher) AddWith(' $addwith -set-cmt '^func (w \*Watcher) Remove(' $remove -set-cmt '^func (w \*Watcher) Close(' $close -set-cmt '^func (w \*Watcher) WatchList(' $watchlist -set-cmt '^[[:space:]]*Events *chan Event$' $events -set-cmt '^[[:space:]]*Errors *chan error$' $errors diff --git a/vendor/github.com/fsnotify/fsnotify/system_bsd.go b/vendor/github.com/fsnotify/fsnotify/system_bsd.go index 4322b0b885..f65e8fe3ed 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_bsd.go +++ b/vendor/github.com/fsnotify/fsnotify/system_bsd.go @@ -1,5 +1,4 @@ //go:build freebsd || openbsd || netbsd || dragonfly -// +build freebsd openbsd netbsd dragonfly package fsnotify diff --git a/vendor/github.com/fsnotify/fsnotify/system_darwin.go b/vendor/github.com/fsnotify/fsnotify/system_darwin.go index 5da5ffa78f..a29fc7aab6 100644 --- a/vendor/github.com/fsnotify/fsnotify/system_darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/system_darwin.go @@ -1,5 +1,4 @@ //go:build darwin -// +build darwin package fsnotify diff --git a/vendor/github.com/go-kit/log/.gitignore b/vendor/github.com/go-kit/log/.gitignore deleted file mode 100644 index 66fd13c903..0000000000 --- a/vendor/github.com/go-kit/log/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib - -# Test binary, built with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Dependency directories (remove the comment below to include it) -# vendor/ diff --git a/vendor/github.com/go-kit/log/LICENSE b/vendor/github.com/go-kit/log/LICENSE deleted file mode 100644 index bb5bdb9cb8..0000000000 --- a/vendor/github.com/go-kit/log/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2021 Go kit - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/go-kit/log/README.md b/vendor/github.com/go-kit/log/README.md deleted file mode 100644 index 8067794657..0000000000 --- a/vendor/github.com/go-kit/log/README.md +++ /dev/null @@ -1,156 +0,0 @@ -# package log - -[![Go Reference](https://pkg.go.dev/badge/github.com/go-kit/log.svg)](https://pkg.go.dev/github.com/go-kit/log) -[![Go Report Card](https://goreportcard.com/badge/go-kit/log)](https://goreportcard.com/report/go-kit/log) -[![GitHub Actions](https://github.com/go-kit/log/actions/workflows/test.yml/badge.svg)](https://github.com/go-kit/log/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/go-kit/log/badge.svg?branch=main)](https://coveralls.io/github/go-kit/log?branch=main) - -`package log` provides a minimal interface for structured logging in services. -It may be wrapped to encode conventions, enforce type-safety, provide leveled -logging, and so on. It can be used for both typical application log events, -and log-structured data streams. - -## Structured logging - -Structured logging is, basically, conceding to the reality that logs are -_data_, and warrant some level of schematic rigor. Using a stricter, -key/value-oriented message format for our logs, containing contextual and -semantic information, makes it much easier to get insight into the -operational activity of the systems we build. Consequently, `package log` is -of the strong belief that "[the benefits of structured logging outweigh the -minimal effort involved](https://www.thoughtworks.com/radar/techniques/structured-logging)". - -Migrating from unstructured to structured logging is probably a lot easier -than you'd expect. - -```go -// Unstructured -log.Printf("HTTP server listening on %s", addr) - -// Structured -logger.Log("transport", "HTTP", "addr", addr, "msg", "listening") -``` - -## Usage - -### Typical application logging - -```go -w := log.NewSyncWriter(os.Stderr) -logger := log.NewLogfmtLogger(w) -logger.Log("question", "what is the meaning of life?", "answer", 42) - -// Output: -// question="what is the meaning of life?" answer=42 -``` - -### Contextual Loggers - -```go -func main() { - var logger log.Logger - logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) - logger = log.With(logger, "instance_id", 123) - - logger.Log("msg", "starting") - NewWorker(log.With(logger, "component", "worker")).Run() - NewSlacker(log.With(logger, "component", "slacker")).Run() -} - -// Output: -// instance_id=123 msg=starting -// instance_id=123 component=worker msg=running -// instance_id=123 component=slacker msg=running -``` - -### Interact with stdlib logger - -Redirect stdlib logger to Go kit logger. - -```go -import ( - "os" - stdlog "log" - kitlog "github.com/go-kit/log" -) - -func main() { - logger := kitlog.NewJSONLogger(kitlog.NewSyncWriter(os.Stdout)) - stdlog.SetOutput(kitlog.NewStdlibAdapter(logger)) - stdlog.Print("I sure like pie") -} - -// Output: -// {"msg":"I sure like pie","ts":"2016/01/01 12:34:56"} -``` - -Or, if, for legacy reasons, you need to pipe all of your logging through the -stdlib log package, you can redirect Go kit logger to the stdlib logger. - -```go -logger := kitlog.NewLogfmtLogger(kitlog.StdlibWriter{}) -logger.Log("legacy", true, "msg", "at least it's something") - -// Output: -// 2016/01/01 12:34:56 legacy=true msg="at least it's something" -``` - -### Timestamps and callers - -```go -var logger log.Logger -logger = log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr)) -logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) - -logger.Log("msg", "hello") - -// Output: -// ts=2016-01-01T12:34:56Z caller=main.go:15 msg=hello -``` - -## Levels - -Log levels are supported via the [level package](https://godoc.org/github.com/go-kit/log/level). - -## Supported output formats - -- [Logfmt](https://brandur.org/logfmt) ([see also](https://blog.codeship.com/logfmt-a-log-format-thats-easy-to-read-and-write)) -- JSON - -## Enhancements - -`package log` is centered on the one-method Logger interface. - -```go -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -This interface, and its supporting code like is the product of much iteration -and evaluation. For more details on the evolution of the Logger interface, -see [The Hunt for a Logger Interface](http://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide#1), -a talk by [Chris Hines](https://github.com/ChrisHines). -Also, please see -[#63](https://github.com/go-kit/kit/issues/63), -[#76](https://github.com/go-kit/kit/pull/76), -[#131](https://github.com/go-kit/kit/issues/131), -[#157](https://github.com/go-kit/kit/pull/157), -[#164](https://github.com/go-kit/kit/issues/164), and -[#252](https://github.com/go-kit/kit/pull/252) -to review historical conversations about package log and the Logger interface. - -Value-add packages and suggestions, -like improvements to [the leveled logger](https://godoc.org/github.com/go-kit/log/level), -are of course welcome. Good proposals should - -- Be composable with [contextual loggers](https://godoc.org/github.com/go-kit/log#With), -- Not break the behavior of [log.Caller](https://godoc.org/github.com/go-kit/log#Caller) in any wrapped contextual loggers, and -- Be friendly to packages that accept only an unadorned log.Logger. - -## Benchmarks & comparisons - -There are a few Go logging benchmarks and comparisons that include Go kit's package log. - -- [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) includes kit/log -- [uber-common/zap](https://github.com/uber-common/zap), a zero-alloc logging library, includes a comparison with kit/log diff --git a/vendor/github.com/go-kit/log/doc.go b/vendor/github.com/go-kit/log/doc.go deleted file mode 100644 index f744382fe4..0000000000 --- a/vendor/github.com/go-kit/log/doc.go +++ /dev/null @@ -1,116 +0,0 @@ -// Package log provides a structured logger. -// -// Structured logging produces logs easily consumed later by humans or -// machines. Humans might be interested in debugging errors, or tracing -// specific requests. Machines might be interested in counting interesting -// events, or aggregating information for off-line processing. In both cases, -// it is important that the log messages are structured and actionable. -// Package log is designed to encourage both of these best practices. -// -// Basic Usage -// -// The fundamental interface is Logger. Loggers create log events from -// key/value data. The Logger interface has a single method, Log, which -// accepts a sequence of alternating key/value pairs, which this package names -// keyvals. -// -// type Logger interface { -// Log(keyvals ...interface{}) error -// } -// -// Here is an example of a function using a Logger to create log events. -// -// func RunTask(task Task, logger log.Logger) string { -// logger.Log("taskID", task.ID, "event", "starting task") -// ... -// logger.Log("taskID", task.ID, "event", "task complete") -// } -// -// The keys in the above example are "taskID" and "event". The values are -// task.ID, "starting task", and "task complete". Every key is followed -// immediately by its value. -// -// Keys are usually plain strings. Values may be any type that has a sensible -// encoding in the chosen log format. With structured logging it is a good -// idea to log simple values without formatting them. This practice allows -// the chosen logger to encode values in the most appropriate way. -// -// Contextual Loggers -// -// A contextual logger stores keyvals that it includes in all log events. -// Building appropriate contextual loggers reduces repetition and aids -// consistency in the resulting log output. With, WithPrefix, and WithSuffix -// add context to a logger. We can use With to improve the RunTask example. -// -// func RunTask(task Task, logger log.Logger) string { -// logger = log.With(logger, "taskID", task.ID) -// logger.Log("event", "starting task") -// ... -// taskHelper(task.Cmd, logger) -// ... -// logger.Log("event", "task complete") -// } -// -// The improved version emits the same log events as the original for the -// first and last calls to Log. Passing the contextual logger to taskHelper -// enables each log event created by taskHelper to include the task.ID even -// though taskHelper does not have access to that value. Using contextual -// loggers this way simplifies producing log output that enables tracing the -// life cycle of individual tasks. (See the Contextual example for the full -// code of the above snippet.) -// -// Dynamic Contextual Values -// -// A Valuer function stored in a contextual logger generates a new value each -// time an event is logged. The Valuer example demonstrates how this feature -// works. -// -// Valuers provide the basis for consistently logging timestamps and source -// code location. The log package defines several valuers for that purpose. -// See Timestamp, DefaultTimestamp, DefaultTimestampUTC, Caller, and -// DefaultCaller. A common logger initialization sequence that ensures all log -// entries contain a timestamp and source location looks like this: -// -// logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger = log.With(logger, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller) -// -// Concurrent Safety -// -// Applications with multiple goroutines want each log event written to the -// same logger to remain separate from other log events. Package log provides -// two simple solutions for concurrent safe logging. -// -// NewSyncWriter wraps an io.Writer and serializes each call to its Write -// method. Using a SyncWriter has the benefit that the smallest practical -// portion of the logging logic is performed within a mutex, but it requires -// the formatting Logger to make only one call to Write per log event. -// -// NewSyncLogger wraps any Logger and serializes each call to its Log method. -// Using a SyncLogger has the benefit that it guarantees each log event is -// handled atomically within the wrapped logger, but it typically serializes -// both the formatting and output logic. Use a SyncLogger if the formatting -// logger may perform multiple writes per log event. -// -// Error Handling -// -// This package relies on the practice of wrapping or decorating loggers with -// other loggers to provide composable pieces of functionality. It also means -// that Logger.Log must return an error because some -// implementations—especially those that output log data to an io.Writer—may -// encounter errors that cannot be handled locally. This in turn means that -// Loggers that wrap other loggers should return errors from the wrapped -// logger up the stack. -// -// Fortunately, the decorator pattern also provides a way to avoid the -// necessity to check for errors every time an application calls Logger.Log. -// An application required to panic whenever its Logger encounters -// an error could initialize its logger as follows. -// -// fmtlogger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stdout)) -// logger := log.LoggerFunc(func(keyvals ...interface{}) error { -// if err := fmtlogger.Log(keyvals...); err != nil { -// panic(err) -// } -// return nil -// }) -package log diff --git a/vendor/github.com/go-kit/log/json_logger.go b/vendor/github.com/go-kit/log/json_logger.go deleted file mode 100644 index d0faed4f09..0000000000 --- a/vendor/github.com/go-kit/log/json_logger.go +++ /dev/null @@ -1,91 +0,0 @@ -package log - -import ( - "encoding" - "encoding/json" - "fmt" - "io" - "reflect" -) - -type jsonLogger struct { - io.Writer -} - -// NewJSONLogger returns a Logger that encodes keyvals to the Writer as a -// single JSON object. Each log event produces no more than one call to -// w.Write. The passed Writer must be safe for concurrent use by multiple -// goroutines if the returned Logger will be used concurrently. -func NewJSONLogger(w io.Writer) Logger { - return &jsonLogger{w} -} - -func (l *jsonLogger) Log(keyvals ...interface{}) error { - n := (len(keyvals) + 1) / 2 // +1 to handle case when len is odd - m := make(map[string]interface{}, n) - for i := 0; i < len(keyvals); i += 2 { - k := keyvals[i] - var v interface{} = ErrMissingValue - if i+1 < len(keyvals) { - v = keyvals[i+1] - } - merge(m, k, v) - } - enc := json.NewEncoder(l.Writer) - enc.SetEscapeHTML(false) - return enc.Encode(m) -} - -func merge(dst map[string]interface{}, k, v interface{}) { - var key string - switch x := k.(type) { - case string: - key = x - case fmt.Stringer: - key = safeString(x) - default: - key = fmt.Sprint(x) - } - - // We want json.Marshaler and encoding.TextMarshaller to take priority over - // err.Error() and v.String(). But json.Marshall (called later) does that by - // default so we force a no-op if it's one of those 2 case. - switch x := v.(type) { - case json.Marshaler: - case encoding.TextMarshaler: - case error: - v = safeError(x) - case fmt.Stringer: - v = safeString(x) - } - - dst[key] = v -} - -func safeString(str fmt.Stringer) (s string) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { - s = "NULL" - } else { - s = fmt.Sprintf("PANIC in String method: %v", panicVal) - } - } - }() - s = str.String() - return -} - -func safeError(err error) (s interface{}) { - defer func() { - if panicVal := recover(); panicVal != nil { - if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { - s = nil - } else { - s = fmt.Sprintf("PANIC in Error method: %v", panicVal) - } - } - }() - s = err.Error() - return -} diff --git a/vendor/github.com/go-kit/log/level/doc.go b/vendor/github.com/go-kit/log/level/doc.go deleted file mode 100644 index fd681dcf92..0000000000 --- a/vendor/github.com/go-kit/log/level/doc.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package level implements leveled logging on top of Go kit's log package. To -// use the level package, create a logger as per normal in your func main, and -// wrap it with level.NewFilter. -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.AllowInfo()) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// It's also possible to configure log level from a string. For instance from -// a flag, environment variable or configuration file. -// -// fs := flag.NewFlagSet("myprogram") -// lvl := fs.String("log", "info", "debug, info, warn, error") -// -// var logger log.Logger -// logger = log.NewLogfmtLogger(os.Stderr) -// logger = level.NewFilter(logger, level.Allow(level.ParseDefault(*lvl, level.InfoValue()))) // <-- -// logger = log.With(logger, "ts", log.DefaultTimestampUTC) -// -// Then, at the callsites, use one of the level.Debug, Info, Warn, or Error -// helper methods to emit leveled log events. -// -// logger.Log("foo", "bar") // as normal, no level -// level.Debug(logger).Log("request_id", reqID, "trace_data", trace.Get()) -// if value > 100 { -// level.Error(logger).Log("value", value) -// } -// -// NewFilter allows precise control over what happens when a log event is -// emitted without a level key, or if a squelched level is used. Check the -// Option functions for details. -package level diff --git a/vendor/github.com/go-kit/log/level/level.go b/vendor/github.com/go-kit/log/level/level.go deleted file mode 100644 index c641d98552..0000000000 --- a/vendor/github.com/go-kit/log/level/level.go +++ /dev/null @@ -1,256 +0,0 @@ -package level - -import ( - "errors" - "strings" - - "github.com/go-kit/log" -) - -// ErrInvalidLevelString is returned whenever an invalid string is passed to Parse. -var ErrInvalidLevelString = errors.New("invalid level string") - -// Error returns a logger that includes a Key/ErrorValue pair. -func Error(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), ErrorValue()) -} - -// Warn returns a logger that includes a Key/WarnValue pair. -func Warn(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), WarnValue()) -} - -// Info returns a logger that includes a Key/InfoValue pair. -func Info(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), InfoValue()) -} - -// Debug returns a logger that includes a Key/DebugValue pair. -func Debug(logger log.Logger) log.Logger { - return log.WithPrefix(logger, Key(), DebugValue()) -} - -// NewFilter wraps next and implements level filtering. See the commentary on -// the Option functions for a detailed description of how to configure levels. -// If no options are provided, all leveled log events created with Debug, -// Info, Warn or Error helper methods are squelched and non-leveled log -// events are passed to next unmodified. -func NewFilter(next log.Logger, options ...Option) log.Logger { - l := &logger{ - next: next, - } - for _, option := range options { - option(l) - } - return l -} - -type logger struct { - next log.Logger - allowed level - squelchNoLevel bool - errNotAllowed error - errNoLevel error -} - -func (l *logger) Log(keyvals ...interface{}) error { - var hasLevel, levelAllowed bool - for i := 1; i < len(keyvals); i += 2 { - if v, ok := keyvals[i].(*levelValue); ok { - hasLevel = true - levelAllowed = l.allowed&v.level != 0 - break - } - } - if !hasLevel && l.squelchNoLevel { - return l.errNoLevel - } - if hasLevel && !levelAllowed { - return l.errNotAllowed - } - return l.next.Log(keyvals...) -} - -// Option sets a parameter for the leveled logger. -type Option func(*logger) - -// Allow the provided log level to pass. -func Allow(v Value) Option { - switch v { - case debugValue: - return AllowDebug() - case infoValue: - return AllowInfo() - case warnValue: - return AllowWarn() - case errorValue: - return AllowError() - default: - return AllowNone() - } -} - -// AllowAll is an alias for AllowDebug. -func AllowAll() Option { - return AllowDebug() -} - -// AllowDebug allows error, warn, info and debug level log events to pass. -func AllowDebug() Option { - return allowed(levelError | levelWarn | levelInfo | levelDebug) -} - -// AllowInfo allows error, warn and info level log events to pass. -func AllowInfo() Option { - return allowed(levelError | levelWarn | levelInfo) -} - -// AllowWarn allows error and warn level log events to pass. -func AllowWarn() Option { - return allowed(levelError | levelWarn) -} - -// AllowError allows only error level log events to pass. -func AllowError() Option { - return allowed(levelError) -} - -// AllowNone allows no leveled log events to pass. -func AllowNone() Option { - return allowed(0) -} - -func allowed(allowed level) Option { - return func(l *logger) { l.allowed = allowed } -} - -// Parse a string to its corresponding level value. Valid strings are "debug", -// "info", "warn", and "error". Strings are normalized via strings.TrimSpace and -// strings.ToLower. -func Parse(level string) (Value, error) { - switch strings.TrimSpace(strings.ToLower(level)) { - case debugValue.name: - return debugValue, nil - case infoValue.name: - return infoValue, nil - case warnValue.name: - return warnValue, nil - case errorValue.name: - return errorValue, nil - default: - return nil, ErrInvalidLevelString - } -} - -// ParseDefault calls Parse and returns the default Value on error. -func ParseDefault(level string, def Value) Value { - v, err := Parse(level) - if err != nil { - return def - } - return v -} - -// ErrNotAllowed sets the error to return from Log when it squelches a log -// event disallowed by the configured Allow[Level] option. By default, -// ErrNotAllowed is nil; in this case the log event is squelched with no -// error. -func ErrNotAllowed(err error) Option { - return func(l *logger) { l.errNotAllowed = err } -} - -// SquelchNoLevel instructs Log to squelch log events with no level, so that -// they don't proceed through to the wrapped logger. If SquelchNoLevel is set -// to true and a log event is squelched in this way, the error value -// configured with ErrNoLevel is returned to the caller. -func SquelchNoLevel(squelch bool) Option { - return func(l *logger) { l.squelchNoLevel = squelch } -} - -// ErrNoLevel sets the error to return from Log when it squelches a log event -// with no level. By default, ErrNoLevel is nil; in this case the log event is -// squelched with no error. -func ErrNoLevel(err error) Option { - return func(l *logger) { l.errNoLevel = err } -} - -// NewInjector wraps next and returns a logger that adds a Key/level pair to -// the beginning of log events that don't already contain a level. In effect, -// this gives a default level to logs without a level. -func NewInjector(next log.Logger, level Value) log.Logger { - return &injector{ - next: next, - level: level, - } -} - -type injector struct { - next log.Logger - level interface{} -} - -func (l *injector) Log(keyvals ...interface{}) error { - for i := 1; i < len(keyvals); i += 2 { - if _, ok := keyvals[i].(*levelValue); ok { - return l.next.Log(keyvals...) - } - } - kvs := make([]interface{}, len(keyvals)+2) - kvs[0], kvs[1] = key, l.level - copy(kvs[2:], keyvals) - return l.next.Log(kvs...) -} - -// Value is the interface that each of the canonical level values implement. -// It contains unexported methods that prevent types from other packages from -// implementing it and guaranteeing that NewFilter can distinguish the levels -// defined in this package from all other values. -type Value interface { - String() string - levelVal() -} - -// Key returns the unique key added to log events by the loggers in this -// package. -func Key() interface{} { return key } - -// ErrorValue returns the unique value added to log events by Error. -func ErrorValue() Value { return errorValue } - -// WarnValue returns the unique value added to log events by Warn. -func WarnValue() Value { return warnValue } - -// InfoValue returns the unique value added to log events by Info. -func InfoValue() Value { return infoValue } - -// DebugValue returns the unique value added to log events by Debug. -func DebugValue() Value { return debugValue } - -var ( - // key is of type interface{} so that it allocates once during package - // initialization and avoids allocating every time the value is added to a - // []interface{} later. - key interface{} = "level" - - errorValue = &levelValue{level: levelError, name: "error"} - warnValue = &levelValue{level: levelWarn, name: "warn"} - infoValue = &levelValue{level: levelInfo, name: "info"} - debugValue = &levelValue{level: levelDebug, name: "debug"} -) - -type level byte - -const ( - levelDebug level = 1 << iota - levelInfo - levelWarn - levelError -) - -type levelValue struct { - name string - level -} - -func (v *levelValue) String() string { return v.name } -func (v *levelValue) levelVal() {} diff --git a/vendor/github.com/go-kit/log/log.go b/vendor/github.com/go-kit/log/log.go deleted file mode 100644 index 62e11adace..0000000000 --- a/vendor/github.com/go-kit/log/log.go +++ /dev/null @@ -1,179 +0,0 @@ -package log - -import "errors" - -// Logger is the fundamental interface for all log operations. Log creates a -// log event from keyvals, a variadic sequence of alternating keys and values. -// Implementations must be safe for concurrent use by multiple goroutines. In -// particular, any implementation of Logger that appends to keyvals or -// modifies or retains any of its elements must make a copy first. -type Logger interface { - Log(keyvals ...interface{}) error -} - -// ErrMissingValue is appended to keyvals slices with odd length to substitute -// the missing value. -var ErrMissingValue = errors.New("(MISSING)") - -// With returns a new contextual logger with keyvals prepended to those passed -// to calls to Log. If logger is also a contextual logger created by With, -// WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func With(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - return &context{ - logger: l.logger, - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - keyvals: kvs[:len(kvs):len(kvs)], - hasValuer: l.hasValuer || containsValuer(keyvals), - sKeyvals: l.sKeyvals, - sHasValuer: l.sHasValuer, - } -} - -// WithPrefix returns a new contextual logger with keyvals prepended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is prepended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithPrefix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.keyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(kvs, l.keyvals...) - return &context{ - logger: l.logger, - keyvals: kvs, - hasValuer: l.hasValuer || containsValuer(keyvals), - sKeyvals: l.sKeyvals, - sHasValuer: l.sHasValuer, - } -} - -// WithSuffix returns a new contextual logger with keyvals appended to those -// passed to calls to Log. If logger is also a contextual logger created by -// With, WithPrefix, or WithSuffix, keyvals is appended to the existing context. -// -// The returned Logger replaces all value elements (odd indexes) containing a -// Valuer with their generated value for each call to its Log method. -func WithSuffix(logger Logger, keyvals ...interface{}) Logger { - if len(keyvals) == 0 { - return logger - } - l := newContext(logger) - // Limiting the capacity of the stored keyvals ensures that a new - // backing array is created if the slice must grow in Log or With. - // Using the extra capacity without copying risks a data race that - // would violate the Logger interface contract. - n := len(l.sKeyvals) + len(keyvals) - if len(keyvals)%2 != 0 { - n++ - } - kvs := make([]interface{}, 0, n) - kvs = append(kvs, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - kvs = append(l.sKeyvals, kvs...) - return &context{ - logger: l.logger, - keyvals: l.keyvals, - hasValuer: l.hasValuer, - sKeyvals: kvs, - sHasValuer: l.sHasValuer || containsValuer(keyvals), - } -} - -// context is the Logger implementation returned by With, WithPrefix, and -// WithSuffix. It wraps a Logger and holds keyvals that it includes in all -// log events. Its Log method calls bindValues to generate values for each -// Valuer in the context keyvals. -// -// A context must always have the same number of stack frames between calls to -// its Log method and the eventual binding of Valuers to their value. This -// requirement comes from the functional requirement to allow a context to -// resolve application call site information for a Caller stored in the -// context. To do this we must be able to predict the number of logging -// functions on the stack when bindValues is called. -// -// Two implementation details provide the needed stack depth consistency. -// -// 1. newContext avoids introducing an additional layer when asked to -// wrap another context. -// 2. With, WithPrefix, and WithSuffix avoid introducing an additional -// layer by returning a newly constructed context with a merged keyvals -// rather than simply wrapping the existing context. -type context struct { - logger Logger - keyvals []interface{} - sKeyvals []interface{} // suffixes - hasValuer bool - sHasValuer bool -} - -func newContext(logger Logger) *context { - if c, ok := logger.(*context); ok { - return c - } - return &context{logger: logger} -} - -// Log replaces all value elements (odd indexes) containing a Valuer in the -// stored context with their generated value, appends keyvals, and passes the -// result to the wrapped Logger. -func (l *context) Log(keyvals ...interface{}) error { - kvs := append(l.keyvals, keyvals...) - if len(kvs)%2 != 0 { - kvs = append(kvs, ErrMissingValue) - } - if l.hasValuer { - // If no keyvals were appended above then we must copy l.keyvals so - // that future log events will reevaluate the stored Valuers. - if len(keyvals) == 0 { - kvs = append([]interface{}{}, l.keyvals...) - } - bindValues(kvs[:(len(l.keyvals))]) - } - kvs = append(kvs, l.sKeyvals...) - if l.sHasValuer { - bindValues(kvs[len(kvs)-len(l.sKeyvals):]) - } - return l.logger.Log(kvs...) -} - -// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If -// f is a function with the appropriate signature, LoggerFunc(f) is a Logger -// object that calls f. -type LoggerFunc func(...interface{}) error - -// Log implements Logger by calling f(keyvals...). -func (f LoggerFunc) Log(keyvals ...interface{}) error { - return f(keyvals...) -} diff --git a/vendor/github.com/go-kit/log/logfmt_logger.go b/vendor/github.com/go-kit/log/logfmt_logger.go deleted file mode 100644 index a00305298b..0000000000 --- a/vendor/github.com/go-kit/log/logfmt_logger.go +++ /dev/null @@ -1,62 +0,0 @@ -package log - -import ( - "bytes" - "io" - "sync" - - "github.com/go-logfmt/logfmt" -) - -type logfmtEncoder struct { - *logfmt.Encoder - buf bytes.Buffer -} - -func (l *logfmtEncoder) Reset() { - l.Encoder.Reset() - l.buf.Reset() -} - -var logfmtEncoderPool = sync.Pool{ - New: func() interface{} { - var enc logfmtEncoder - enc.Encoder = logfmt.NewEncoder(&enc.buf) - return &enc - }, -} - -type logfmtLogger struct { - w io.Writer -} - -// NewLogfmtLogger returns a logger that encodes keyvals to the Writer in -// logfmt format. Each log event produces no more than one call to w.Write. -// The passed Writer must be safe for concurrent use by multiple goroutines if -// the returned Logger will be used concurrently. -func NewLogfmtLogger(w io.Writer) Logger { - return &logfmtLogger{w} -} - -func (l logfmtLogger) Log(keyvals ...interface{}) error { - enc := logfmtEncoderPool.Get().(*logfmtEncoder) - enc.Reset() - defer logfmtEncoderPool.Put(enc) - - if err := enc.EncodeKeyvals(keyvals...); err != nil { - return err - } - - // Add newline to the end of the buffer - if err := enc.EndRecord(); err != nil { - return err - } - - // The Logger interface requires implementations to be safe for concurrent - // use by multiple goroutines. For this implementation that means making - // only one call to l.w.Write() for each call to Log. - if _, err := l.w.Write(enc.buf.Bytes()); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/go-kit/log/nop_logger.go b/vendor/github.com/go-kit/log/nop_logger.go deleted file mode 100644 index 1047d626c4..0000000000 --- a/vendor/github.com/go-kit/log/nop_logger.go +++ /dev/null @@ -1,8 +0,0 @@ -package log - -type nopLogger struct{} - -// NewNopLogger returns a logger that doesn't do anything. -func NewNopLogger() Logger { return nopLogger{} } - -func (nopLogger) Log(...interface{}) error { return nil } diff --git a/vendor/github.com/go-kit/log/staticcheck.conf b/vendor/github.com/go-kit/log/staticcheck.conf deleted file mode 100644 index 528438b97d..0000000000 --- a/vendor/github.com/go-kit/log/staticcheck.conf +++ /dev/null @@ -1 +0,0 @@ -checks = ["all"] diff --git a/vendor/github.com/go-kit/log/stdlib.go b/vendor/github.com/go-kit/log/stdlib.go deleted file mode 100644 index 0338edbe2b..0000000000 --- a/vendor/github.com/go-kit/log/stdlib.go +++ /dev/null @@ -1,151 +0,0 @@ -package log - -import ( - "bytes" - "io" - "log" - "regexp" - "strings" -) - -// StdlibWriter implements io.Writer by invoking the stdlib log.Print. It's -// designed to be passed to a Go kit logger as the writer, for cases where -// it's necessary to redirect all Go kit log output to the stdlib logger. -// -// If you have any choice in the matter, you shouldn't use this. Prefer to -// redirect the stdlib log to the Go kit logger via NewStdlibAdapter. -type StdlibWriter struct{} - -// Write implements io.Writer. -func (w StdlibWriter) Write(p []byte) (int, error) { - log.Print(strings.TrimSpace(string(p))) - return len(p), nil -} - -// StdlibAdapter wraps a Logger and allows it to be passed to the stdlib -// logger's SetOutput. It will extract date/timestamps, filenames, and -// messages, and place them under relevant keys. -type StdlibAdapter struct { - Logger - timestampKey string - fileKey string - messageKey string - prefix string - joinPrefixToMsg bool -} - -// StdlibAdapterOption sets a parameter for the StdlibAdapter. -type StdlibAdapterOption func(*StdlibAdapter) - -// TimestampKey sets the key for the timestamp field. By default, it's "ts". -func TimestampKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.timestampKey = key } -} - -// FileKey sets the key for the file and line field. By default, it's "caller". -func FileKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.fileKey = key } -} - -// MessageKey sets the key for the actual log message. By default, it's "msg". -func MessageKey(key string) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.messageKey = key } -} - -// Prefix configures the adapter to parse a prefix from stdlib log events. If -// you provide a non-empty prefix to the stdlib logger, then your should provide -// that same prefix to the adapter via this option. -// -// By default, the prefix isn't included in the msg key. Set joinPrefixToMsg to -// true if you want to include the parsed prefix in the msg. -func Prefix(prefix string, joinPrefixToMsg bool) StdlibAdapterOption { - return func(a *StdlibAdapter) { a.prefix = prefix; a.joinPrefixToMsg = joinPrefixToMsg } -} - -// NewStdlibAdapter returns a new StdlibAdapter wrapper around the passed -// logger. It's designed to be passed to log.SetOutput. -func NewStdlibAdapter(logger Logger, options ...StdlibAdapterOption) io.Writer { - a := StdlibAdapter{ - Logger: logger, - timestampKey: "ts", - fileKey: "caller", - messageKey: "msg", - } - for _, option := range options { - option(&a) - } - return a -} - -func (a StdlibAdapter) Write(p []byte) (int, error) { - p = a.handlePrefix(p) - - result := subexps(p) - keyvals := []interface{}{} - var timestamp string - if date, ok := result["date"]; ok && date != "" { - timestamp = date - } - if time, ok := result["time"]; ok && time != "" { - if timestamp != "" { - timestamp += " " - } - timestamp += time - } - if timestamp != "" { - keyvals = append(keyvals, a.timestampKey, timestamp) - } - if file, ok := result["file"]; ok && file != "" { - keyvals = append(keyvals, a.fileKey, file) - } - if msg, ok := result["msg"]; ok { - msg = a.handleMessagePrefix(msg) - keyvals = append(keyvals, a.messageKey, msg) - } - if err := a.Logger.Log(keyvals...); err != nil { - return 0, err - } - return len(p), nil -} - -func (a StdlibAdapter) handlePrefix(p []byte) []byte { - if a.prefix != "" { - p = bytes.TrimPrefix(p, []byte(a.prefix)) - } - return p -} - -func (a StdlibAdapter) handleMessagePrefix(msg string) string { - if a.prefix == "" { - return msg - } - - msg = strings.TrimPrefix(msg, a.prefix) - if a.joinPrefixToMsg { - msg = a.prefix + msg - } - return msg -} - -const ( - logRegexpDate = `(?P[0-9]{4}/[0-9]{2}/[0-9]{2})?[ ]?` - logRegexpTime = `(?P

- See changes to v1.16.x - - -* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7) - * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829 - * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832 - -* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6) - * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806 - * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824 - * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815 - * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663 - -* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5) - * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802 - * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804 - -* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4) - * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784 - * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792 - * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785 - * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 - * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 - * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 - -* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) - * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 - * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767 - * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766 - * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773 - * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774 - -* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0) - * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685 - * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752 - * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755 - * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748 - * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747 - * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746 -
- -
- See changes to v1.15.x - -* Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 - * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 - * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 - * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 - -* Jan 3rd, 2023 (v1.15.14) - - * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718 - * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720 - * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722 - * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723 - -* Dec 11, 2022 (v1.15.13) - * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691 - * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708 - -* Oct 26, 2022 (v1.15.12) - - * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680 - * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683 - -* Sept 26, 2022 (v1.15.11) - - * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678 - * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677 - * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668 - * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667 - -* Sept 16, 2022 (v1.15.10) - - * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 - * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 - * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 - * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 - * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 - * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 - * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659 - -* July 21, 2022 (v1.15.9) - - * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 - * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 - -* July 13, 2022 (v1.15.8) - - * gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641 - * s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638 - * zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636 - * zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637 - * huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634 - * zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640 - * gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639 - -* June 29, 2022 (v1.15.7) - - * s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633 - * zip: Merge upstream https://github.com/klauspost/compress/pull/631 - * zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624 - * zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598 - * flate: Faster histograms https://github.com/klauspost/compress/pull/620 - * deflate: Use compound hcode https://github.com/klauspost/compress/pull/622 - -* June 3, 2022 (v1.15.6) - * s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613 - * s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611 - * zstd: Always use configured block size https://github.com/klauspost/compress/pull/605 - * zstd: Fix incorrect hash table placement for dict encoding in default https://github.com/klauspost/compress/pull/606 - * zstd: Apply default config to ZipDecompressor without options https://github.com/klauspost/compress/pull/608 - * gzhttp: Exclude more common archive formats https://github.com/klauspost/compress/pull/612 - * s2: Add ReaderIgnoreCRC https://github.com/klauspost/compress/pull/609 - * s2: Remove sanity load on index creation https://github.com/klauspost/compress/pull/607 - * snappy: Use dedicated function for scoring https://github.com/klauspost/compress/pull/614 - * s2c+s2d: Use official snappy framed extension https://github.com/klauspost/compress/pull/610 - -* May 25, 2022 (v1.15.5) - * s2: Add concurrent stream decompression https://github.com/klauspost/compress/pull/602 - * s2: Fix final emit oob read crash on amd64 https://github.com/klauspost/compress/pull/601 - * huff0: asm implementation of Decompress1X by @WojciechMula https://github.com/klauspost/compress/pull/596 - * zstd: Use 1 less goroutine for stream decoding https://github.com/klauspost/compress/pull/588 - * zstd: Copy literal in 16 byte blocks when possible https://github.com/klauspost/compress/pull/592 - * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 - * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 - * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 - - -* May 11, 2022 (v1.15.4) - * huff0: decompress directly into output by @WojciechMula in [#577](https://github.com/klauspost/compress/pull/577) - * inflate: Keep dict on stack [#581](https://github.com/klauspost/compress/pull/581) - * zstd: Faster decoding memcopy in asm [#583](https://github.com/klauspost/compress/pull/583) - * zstd: Fix ignored crc [#580](https://github.com/klauspost/compress/pull/580) - -* May 5, 2022 (v1.15.3) - * zstd: Allow to ignore checksum checking by @WojciechMula [#572](https://github.com/klauspost/compress/pull/572) - * s2: Fix incorrect seek for io.SeekEnd in [#575](https://github.com/klauspost/compress/pull/575) - -* Apr 26, 2022 (v1.15.2) - * zstd: Add x86-64 assembly for decompression on streams and blocks. Contributed by [@WojciechMula](https://github.com/WojciechMula). Typically 2x faster. [#528](https://github.com/klauspost/compress/pull/528) [#531](https://github.com/klauspost/compress/pull/531) [#545](https://github.com/klauspost/compress/pull/545) [#537](https://github.com/klauspost/compress/pull/537) - * zstd: Add options to ZipDecompressor and fixes [#539](https://github.com/klauspost/compress/pull/539) - * s2: Use sorted search for index [#555](https://github.com/klauspost/compress/pull/555) - * Minimum version is Go 1.16, added CI test on 1.18. - -* Mar 11, 2022 (v1.15.1) - * huff0: Add x86 assembly of Decode4X by @WojciechMula in [#512](https://github.com/klauspost/compress/pull/512) - * zstd: Reuse zip decoders in [#514](https://github.com/klauspost/compress/pull/514) - * zstd: Detect extra block data and report as corrupted in [#520](https://github.com/klauspost/compress/pull/520) - * zstd: Handle zero sized frame content size stricter in [#521](https://github.com/klauspost/compress/pull/521) - * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) - -* Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) - * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) - -Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. - -Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected. - -While the release has been extensively tested, it is recommended to testing when upgrading. - -
- -
- See changes to v1.14.x - -* Feb 22, 2022 (v1.14.4) - * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) - * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) - * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) - -* Feb 17, 2022 (v1.14.3) - * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) - * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) - * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) - -* Jan 25, 2022 (v1.14.2) - * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) - * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) - * zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470) - * zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472) - * flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473) - * zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475) - -* Jan 11, 2022 (v1.14.1) - * s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462) - * flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458) - * zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468) - * zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464) - * Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445) -
- -
- See changes to v1.13.x - -* Aug 30, 2021 (v1.13.5) - * gz/zlib/flate: Alias stdlib errors [#425](https://github.com/klauspost/compress/pull/425) - * s2: Add block support to commandline tools [#413](https://github.com/klauspost/compress/pull/413) - * zstd: pooledZipWriter should return Writers to the same pool [#426](https://github.com/klauspost/compress/pull/426) - * Removed golang/snappy as external dependency for tests [#421](https://github.com/klauspost/compress/pull/421) - -* Aug 12, 2021 (v1.13.4) - * Add [snappy replacement package](https://github.com/klauspost/compress/tree/master/snappy). - * zstd: Fix incorrect encoding in "best" mode [#415](https://github.com/klauspost/compress/pull/415) - -* Aug 3, 2021 (v1.13.3) - * zstd: Improve Best compression [#404](https://github.com/klauspost/compress/pull/404) - * zstd: Fix WriteTo error forwarding [#411](https://github.com/klauspost/compress/pull/411) - * gzhttp: Return http.HandlerFunc instead of http.Handler. Unlikely breaking change. [#406](https://github.com/klauspost/compress/pull/406) - * s2sx: Fix max size error [#399](https://github.com/klauspost/compress/pull/399) - * zstd: Add optional stream content size on reset [#401](https://github.com/klauspost/compress/pull/401) - * zstd: use SpeedBestCompression for level >= 10 [#410](https://github.com/klauspost/compress/pull/410) - -* Jun 14, 2021 (v1.13.1) - * s2: Add full Snappy output support [#396](https://github.com/klauspost/compress/pull/396) - * zstd: Add configurable [Decoder window](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithDecoderMaxWindow) size [#394](https://github.com/klauspost/compress/pull/394) - * gzhttp: Add header to skip compression [#389](https://github.com/klauspost/compress/pull/389) - * s2: Improve speed with bigger output margin [#395](https://github.com/klauspost/compress/pull/395) - -* Jun 3, 2021 (v1.13.0) - * Added [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp#gzip-handler) which allows wrapping HTTP servers and clients with GZIP compressors. - * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) - * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) -
- - -
- See changes to v1.12.x - -* May 25, 2021 (v1.12.3) - * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) - * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) - * zstd: Forward read errors [#373](https://github.com/klauspost/compress/pull/373) - -* Apr 27, 2021 (v1.12.2) - * zstd: Improve better/best compression [#360](https://github.com/klauspost/compress/pull/360) [#364](https://github.com/klauspost/compress/pull/364) [#365](https://github.com/klauspost/compress/pull/365) - * zstd: Add helpers to compress/decompress zstd inside zip files [#363](https://github.com/klauspost/compress/pull/363) - * deflate: Improve level 5+6 compression [#367](https://github.com/klauspost/compress/pull/367) - * s2: Improve better/best compression [#358](https://github.com/klauspost/compress/pull/358) [#359](https://github.com/klauspost/compress/pull/358) - * s2: Load after checking src limit on amd64. [#362](https://github.com/klauspost/compress/pull/362) - * s2sx: Limit max executable size [#368](https://github.com/klauspost/compress/pull/368) - -* Apr 14, 2021 (v1.12.1) - * snappy package removed. Upstream added as dependency. - * s2: Better compression in "best" mode [#353](https://github.com/klauspost/compress/pull/353) - * s2sx: Add stdin input and detect pre-compressed from signature [#352](https://github.com/klauspost/compress/pull/352) - * s2c/s2d: Add http as possible input [#348](https://github.com/klauspost/compress/pull/348) - * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) - * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) - * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) -
- -
- See changes to v1.11.x - -* Mar 26, 2021 (v1.11.13) - * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) - * zstd: Add [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) encoder option [#336](https://github.com/klauspost/compress/pull/336) - * deflate: Improve entropy compression [#338](https://github.com/klauspost/compress/pull/338) - * s2: Clean up and minor performance improvement in best [#341](https://github.com/klauspost/compress/pull/341) - -* Mar 5, 2021 (v1.11.12) - * s2: Add `s2sx` binary that creates [self extracting archives](https://github.com/klauspost/compress/tree/master/s2#s2sx-self-extracting-archives). - * s2: Speed up decompression on non-assembly platforms [#328](https://github.com/klauspost/compress/pull/328) - -* Mar 1, 2021 (v1.11.9) - * s2: Add ARM64 decompression assembly. Around 2x output speed. [#324](https://github.com/klauspost/compress/pull/324) - * s2: Improve "better" speed and efficiency. [#325](https://github.com/klauspost/compress/pull/325) - * s2: Fix binaries. - -* Feb 25, 2021 (v1.11.8) - * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. - * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) - * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) - * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) - * zip: Fix zip64 headers. [#313](https://github.com/klauspost/compress/pull/313) - -* Jan 14, 2021 (v1.11.7) - * Use Bytes() interface to get bytes across packages. [#309](https://github.com/klauspost/compress/pull/309) - * s2: Add 'best' compression option. [#310](https://github.com/klauspost/compress/pull/310) - * s2: Add ReaderMaxBlockSize, changes `s2.NewReader` signature to include varargs. [#311](https://github.com/klauspost/compress/pull/311) - * s2: Fix crash on small better buffers. [#308](https://github.com/klauspost/compress/pull/308) - * s2: Clean up decoder. [#312](https://github.com/klauspost/compress/pull/312) - -* Jan 7, 2021 (v1.11.6) - * zstd: Make decoder allocations smaller [#306](https://github.com/klauspost/compress/pull/306) - * zstd: Free Decoder resources when Reset is called with a nil io.Reader [#305](https://github.com/klauspost/compress/pull/305) - -* Dec 20, 2020 (v1.11.4) - * zstd: Add Best compression mode [#304](https://github.com/klauspost/compress/pull/304) - * Add header decoder [#299](https://github.com/klauspost/compress/pull/299) - * s2: Add uncompressed stream option [#297](https://github.com/klauspost/compress/pull/297) - * Simplify/speed up small blocks with known max size. [#300](https://github.com/klauspost/compress/pull/300) - * zstd: Always reset literal dict encoder [#303](https://github.com/klauspost/compress/pull/303) - -* Nov 15, 2020 (v1.11.3) - * inflate: 10-15% faster decompression [#293](https://github.com/klauspost/compress/pull/293) - * zstd: Tweak DecodeAll default allocation [#295](https://github.com/klauspost/compress/pull/295) - -* Oct 11, 2020 (v1.11.2) - * s2: Fix out of bounds read in "better" block compression [#291](https://github.com/klauspost/compress/pull/291) - -* Oct 1, 2020 (v1.11.1) - * zstd: Set allLitEntropy true in default configuration [#286](https://github.com/klauspost/compress/pull/286) - -* Sept 8, 2020 (v1.11.0) - * zstd: Add experimental compression [dictionaries](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) [#281](https://github.com/klauspost/compress/pull/281) - * zstd: Fix mixed Write and ReadFrom calls [#282](https://github.com/klauspost/compress/pull/282) - * inflate/gz: Limit variable shifts, ~5% faster decompression [#274](https://github.com/klauspost/compress/pull/274) -
- -
- See changes to v1.10.x - -* July 8, 2020 (v1.10.11) - * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) - * huff0: Also populate compression table when reading decoding table. [#275](https://github.com/klauspost/compress/pull/275) - -* June 23, 2020 (v1.10.10) - * zstd: Skip entropy compression in fastest mode when no matches. [#270](https://github.com/klauspost/compress/pull/270) - -* June 16, 2020 (v1.10.9): - * zstd: API change for specifying dictionaries. See [#268](https://github.com/klauspost/compress/pull/268) - * zip: update CreateHeaderRaw to handle zip64 fields. [#266](https://github.com/klauspost/compress/pull/266) - * Fuzzit tests removed. The service has been purchased and is no longer available. - -* June 5, 2020 (v1.10.8): - * 1.15x faster zstd block decompression. [#265](https://github.com/klauspost/compress/pull/265) - -* June 1, 2020 (v1.10.7): - * Added zstd decompression [dictionary support](https://github.com/klauspost/compress/tree/master/zstd#dictionaries) - * Increase zstd decompression speed up to 1.19x. [#259](https://github.com/klauspost/compress/pull/259) - * Remove internal reset call in zstd compression and reduce allocations. [#263](https://github.com/klauspost/compress/pull/263) - -* May 21, 2020: (v1.10.6) - * zstd: Reduce allocations while decoding. [#258](https://github.com/klauspost/compress/pull/258), [#252](https://github.com/klauspost/compress/pull/252) - * zstd: Stricter decompression checks. - -* April 12, 2020: (v1.10.5) - * s2-commands: Flush output when receiving SIGINT. [#239](https://github.com/klauspost/compress/pull/239) - -* Apr 8, 2020: (v1.10.4) - * zstd: Minor/special case optimizations. [#251](https://github.com/klauspost/compress/pull/251), [#250](https://github.com/klauspost/compress/pull/250), [#249](https://github.com/klauspost/compress/pull/249), [#247](https://github.com/klauspost/compress/pull/247) -* Mar 11, 2020: (v1.10.3) - * s2: Use S2 encoder in pure Go mode for Snappy output as well. [#245](https://github.com/klauspost/compress/pull/245) - * s2: Fix pure Go block encoder. [#244](https://github.com/klauspost/compress/pull/244) - * zstd: Added "better compression" mode. [#240](https://github.com/klauspost/compress/pull/240) - * zstd: Improve speed of fastest compression mode by 5-10% [#241](https://github.com/klauspost/compress/pull/241) - * zstd: Skip creating encoders when not needed. [#238](https://github.com/klauspost/compress/pull/238) - -* Feb 27, 2020: (v1.10.2) - * Close to 50% speedup in inflate (gzip/zip decompression). [#236](https://github.com/klauspost/compress/pull/236) [#234](https://github.com/klauspost/compress/pull/234) [#232](https://github.com/klauspost/compress/pull/232) - * Reduce deflate level 1-6 memory usage up to 59%. [#227](https://github.com/klauspost/compress/pull/227) - -* Feb 18, 2020: (v1.10.1) - * Fix zstd crash when resetting multiple times without sending data. [#226](https://github.com/klauspost/compress/pull/226) - * deflate: Fix dictionary use on level 1-6. [#224](https://github.com/klauspost/compress/pull/224) - * Remove deflate writer reference when closing. [#224](https://github.com/klauspost/compress/pull/224) - -* Feb 4, 2020: (v1.10.0) - * Add optional dictionary to [stateless deflate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc#StatelessDeflate). Breaking change, send `nil` for previous behaviour. [#216](https://github.com/klauspost/compress/pull/216) - * Fix buffer overflow on repeated small block deflate. [#218](https://github.com/klauspost/compress/pull/218) - * Allow copying content from an existing ZIP file without decompressing+compressing. [#214](https://github.com/klauspost/compress/pull/214) - * Added [S2](https://github.com/klauspost/compress/tree/master/s2#s2-compression) AMD64 assembler and various optimizations. Stream speed >10GB/s. [#186](https://github.com/klauspost/compress/pull/186) - -
- -
- See changes prior to v1.10.0 - -* Jan 20,2020 (v1.9.8) Optimize gzip/deflate with better size estimates and faster table generation. [#207](https://github.com/klauspost/compress/pull/207) by [luyu6056](https://github.com/luyu6056), [#206](https://github.com/klauspost/compress/pull/206). -* Jan 11, 2020: S2 Encode/Decode will use provided buffer if capacity is big enough. [#204](https://github.com/klauspost/compress/pull/204) -* Jan 5, 2020: (v1.9.7) Fix another zstd regression in v1.9.5 - v1.9.6 removed. -* Jan 4, 2020: (v1.9.6) Regression in v1.9.5 fixed causing corrupt zstd encodes in rare cases. -* Jan 4, 2020: Faster IO in [s2c + s2d commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) compression/decompression. [#192](https://github.com/klauspost/compress/pull/192) -* Dec 29, 2019: Removed v1.9.5 since fuzz tests showed a compatibility problem with the reference zstandard decoder. -* Dec 29, 2019: (v1.9.5) zstd: 10-20% faster block compression. [#199](https://github.com/klauspost/compress/pull/199) -* Dec 29, 2019: [zip](https://godoc.org/github.com/klauspost/compress/zip) package updated with latest Go features -* Dec 29, 2019: zstd: Single segment flag condintions tweaked. [#197](https://github.com/klauspost/compress/pull/197) -* Dec 18, 2019: s2: Faster compression when ReadFrom is used. [#198](https://github.com/klauspost/compress/pull/198) -* Dec 10, 2019: s2: Fix repeat length output when just above at 16MB limit. -* Dec 10, 2019: zstd: Add function to get decoder as io.ReadCloser. [#191](https://github.com/klauspost/compress/pull/191) -* Dec 3, 2019: (v1.9.4) S2: limit max repeat length. [#188](https://github.com/klauspost/compress/pull/188) -* Dec 3, 2019: Add [WithNoEntropyCompression](https://godoc.org/github.com/klauspost/compress/zstd#WithNoEntropyCompression) to zstd [#187](https://github.com/klauspost/compress/pull/187) -* Dec 3, 2019: Reduce memory use for tests. Check for leaked goroutines. -* Nov 28, 2019 (v1.9.3) Less allocations in stateless deflate. -* Nov 28, 2019: 5-20% Faster huff0 decode. Impacts zstd as well. [#184](https://github.com/klauspost/compress/pull/184) -* Nov 12, 2019 (v1.9.2) Added [Stateless Compression](#stateless-compression) for gzip/deflate. -* Nov 12, 2019: Fixed zstd decompression of large single blocks. [#180](https://github.com/klauspost/compress/pull/180) -* Nov 11, 2019: Set default [s2c](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) block size to 4MB. -* Nov 11, 2019: Reduce inflate memory use by 1KB. -* Nov 10, 2019: Less allocations in deflate bit writer. -* Nov 10, 2019: Fix inconsistent error returned by zstd decoder. -* Oct 28, 2019 (v1.9.1) ztsd: Fix crash when compressing blocks. [#174](https://github.com/klauspost/compress/pull/174) -* Oct 24, 2019 (v1.9.0) zstd: Fix rare data corruption [#173](https://github.com/klauspost/compress/pull/173) -* Oct 24, 2019 zstd: Fix huff0 out of buffer write [#171](https://github.com/klauspost/compress/pull/171) and always return errors [#172](https://github.com/klauspost/compress/pull/172) -* Oct 10, 2019: Big deflate rewrite, 30-40% faster with better compression [#105](https://github.com/klauspost/compress/pull/105) - -
- -
- See changes prior to v1.9.0 - -* Oct 10, 2019: (v1.8.6) zstd: Allow partial reads to get flushed data. [#169](https://github.com/klauspost/compress/pull/169) -* Oct 3, 2019: Fix inconsistent results on broken zstd streams. -* Sep 25, 2019: Added `-rm` (remove source files) and `-q` (no output except errors) to `s2c` and `s2d` [commands](https://github.com/klauspost/compress/tree/master/s2#commandline-tools) -* Sep 16, 2019: (v1.8.4) Add `s2c` and `s2d` [commandline tools](https://github.com/klauspost/compress/tree/master/s2#commandline-tools). -* Sep 10, 2019: (v1.8.3) Fix s2 decoder [Skip](https://godoc.org/github.com/klauspost/compress/s2#Reader.Skip). -* Sep 7, 2019: zstd: Added [WithWindowSize](https://godoc.org/github.com/klauspost/compress/zstd#WithWindowSize), contributed by [ianwilkes](https://github.com/ianwilkes). -* Sep 5, 2019: (v1.8.2) Add [WithZeroFrames](https://godoc.org/github.com/klauspost/compress/zstd#WithZeroFrames) which adds full zero payload block encoding option. -* Sep 5, 2019: Lazy initialization of zstandard predefined en/decoder tables. -* Aug 26, 2019: (v1.8.1) S2: 1-2% compression increase in "better" compression mode. -* Aug 26, 2019: zstd: Check maximum size of Huffman 1X compressed literals while decoding. -* Aug 24, 2019: (v1.8.0) Added [S2 compression](https://github.com/klauspost/compress/tree/master/s2#s2-compression), a high performance replacement for Snappy. -* Aug 21, 2019: (v1.7.6) Fixed minor issues found by fuzzer. One could lead to zstd not decompressing. -* Aug 18, 2019: Add [fuzzit](https://fuzzit.dev/) continuous fuzzing. -* Aug 14, 2019: zstd: Skip incompressible data 2x faster. [#147](https://github.com/klauspost/compress/pull/147) -* Aug 4, 2019 (v1.7.5): Better literal compression. [#146](https://github.com/klauspost/compress/pull/146) -* Aug 4, 2019: Faster zstd compression. [#143](https://github.com/klauspost/compress/pull/143) [#144](https://github.com/klauspost/compress/pull/144) -* Aug 4, 2019: Faster zstd decompression. [#145](https://github.com/klauspost/compress/pull/145) [#143](https://github.com/klauspost/compress/pull/143) [#142](https://github.com/klauspost/compress/pull/142) -* July 15, 2019 (v1.7.4): Fix double EOF block in rare cases on zstd encoder. -* July 15, 2019 (v1.7.3): Minor speedup/compression increase in default zstd encoder. -* July 14, 2019: zstd decoder: Fix decompression error on multiple uses with mixed content. -* July 7, 2019 (v1.7.2): Snappy update, zstd decoder potential race fix. -* June 17, 2019: zstd decompression bugfix. -* June 17, 2019: fix 32 bit builds. -* June 17, 2019: Easier use in modules (less dependencies). -* June 9, 2019: New stronger "default" [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression mode. Matches zstd default compression ratio. -* June 5, 2019: 20-40% throughput in [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and better compression. -* June 5, 2019: deflate/gzip compression: Reduce memory usage of lower compression levels. -* June 2, 2019: Added [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression! -* May 25, 2019: deflate/gzip: 10% faster bit writer, mostly visible in lower levels. -* Apr 22, 2019: [zstd](https://github.com/klauspost/compress/tree/master/zstd#zstd) decompression added. -* Aug 1, 2018: Added [huff0 README](https://github.com/klauspost/compress/tree/master/huff0#huff0-entropy-compression). -* Jul 8, 2018: Added [Performance Update 2018](#performance-update-2018) below. -* Jun 23, 2018: Merged [Go 1.11 inflate optimizations](https://go-review.googlesource.com/c/go/+/102235). Go 1.9 is now required. Backwards compatible version tagged with [v1.3.0](https://github.com/klauspost/compress/releases/tag/v1.3.0). -* Apr 2, 2018: Added [huff0](https://godoc.org/github.com/klauspost/compress/huff0) en/decoder. Experimental for now, API may change. -* Mar 4, 2018: Added [FSE Entropy](https://godoc.org/github.com/klauspost/compress/fse) en/decoder. Experimental for now, API may change. -* Nov 3, 2017: Add compression [Estimate](https://godoc.org/github.com/klauspost/compress#Estimate) function. -* May 28, 2017: Reduce allocations when resetting decoder. -* Apr 02, 2017: Change back to official crc32, since changes were merged in Go 1.7. -* Jan 14, 2017: Reduce stack pressure due to array copies. See [Issue #18625](https://github.com/golang/go/issues/18625). -* Oct 25, 2016: Level 2-4 have been rewritten and now offers significantly better performance than before. -* Oct 20, 2016: Port zlib changes from Go 1.7 to fix zlib writer issue. Please update. -* Oct 16, 2016: Go 1.7 changes merged. Apples to apples this package is a few percent faster, but has a significantly better balance between speed and compression per level. -* Mar 24, 2016: Always attempt Huffman encoding on level 4-7. This improves base 64 encoded data compression. -* Mar 24, 2016: Small speedup for level 1-3. -* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. -* Feb 19, 2016: Handle small payloads faster in level 1-3. -* Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. -* Feb 14, 2016: Snappy: Merge upstream changes. -* Feb 14, 2016: Snappy: Fix aggressive skipping. -* Feb 14, 2016: Snappy: Update benchmark. -* Feb 13, 2016: Deflate: Fixed assembler problem that could lead to sub-optimal compression. -* Feb 12, 2016: Snappy: Added AMD64 SSE 4.2 optimizations to matching, which makes easy to compress material run faster. Typical speedup is around 25%. -* Feb 9, 2016: Added Snappy package fork. This version is 5-7% faster, much more on hard to compress content. -* Jan 30, 2016: Optimize level 1 to 3 by not considering static dictionary or storing uncompressed. ~4-5% speedup. -* Jan 16, 2016: Optimization on deflate level 1,2,3 compression. -* Jan 8 2016: Merge [CL 18317](https://go-review.googlesource.com/#/c/18317): fix reading, writing of zip64 archives. -* Dec 8 2015: Make level 1 and -2 deterministic even if write size differs. -* Dec 8 2015: Split encoding functions, so hashing and matching can potentially be inlined. 1-3% faster on AMD64. 5% faster on other platforms. -* Dec 8 2015: Fixed rare [one byte out-of bounds read](https://github.com/klauspost/compress/issues/20). Please update! -* Nov 23 2015: Optimization on token writer. ~2-4% faster. Contributed by [@dsnet](https://github.com/dsnet). -* Nov 20 2015: Small optimization to bit writer on 64 bit systems. -* Nov 17 2015: Fixed out-of-bound errors if the underlying Writer returned an error. See [#15](https://github.com/klauspost/compress/issues/15). -* Nov 12 2015: Added [io.WriterTo](https://golang.org/pkg/io/#WriterTo) support to gzip/inflate. -* Nov 11 2015: Merged [CL 16669](https://go-review.googlesource.com/#/c/16669/4): archive/zip: enable overriding (de)compressors per file -* Oct 15 2015: Added skipping on uncompressible data. Random data speed up >5x. - -
- -# deflate usage - -The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: - -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) - -* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). - -You may also be interested in [pgzip](https://github.com/klauspost/pgzip), which is a drop in replacement for gzip, which support multithreaded compression on big files and the optimized [crc32](https://github.com/klauspost/crc32) package used by these packages. - -The packages contains the same as the standard library, so you can use the godoc for that: [gzip](http://golang.org/pkg/compress/gzip/), [zip](http://golang.org/pkg/archive/zip/), [zlib](http://golang.org/pkg/compress/zlib/), [flate](http://golang.org/pkg/compress/flate/). - -Currently there is only minor speedup on decompression (mostly CRC32 calculation). - -Memory usage is typically 1MB for a Writer. stdlib is in the same range. -If you expect to have a lot of concurrently allocated Writers consider using -the stateless compress described below. - -For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). - -To disable all assembly add `-tags=noasm`. This works across all packages. - -# Stateless compression - -This package offers stateless compression as a special option for gzip/deflate. -It will do compression but without maintaining any state between Write calls. - -This means there will be no memory kept between Write calls, but compression and speed will be suboptimal. - -This is only relevant in cases where you expect to run many thousands of compressors concurrently, -but with very little activity. This is *not* intended for regular web servers serving individual requests. - -Because of this, the size of actual Write calls will affect output size. - -In gzip, specify level `-3` / `gzip.StatelessCompression` to enable. - -For direct deflate use, NewStatelessWriter and StatelessDeflate are available. See [documentation](https://godoc.org/github.com/klauspost/compress/flate#NewStatelessWriter) - -A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer: - -```go - // replace 'ioutil.Discard' with your output. - gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression) - if err != nil { - return err - } - defer gzw.Close() - - w := bufio.NewWriterSize(gzw, 4096) - defer w.Flush() - - // Write to 'w' -``` - -This will only use up to 4KB in memory when the writer is idle. - -Compression is almost always worse than the fastest compression level -and each write will allocate (a little) memory. - -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. - -# Other packages - -Here are other packages of good quality and pure Go (no cgo wrappers or autoconverted code): - -* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression. -* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression. -* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer. -* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression. -* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression. -* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index. -* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor. - -# license - -This code is licensed under the same conditions as the original Go code. See LICENSE file. diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md deleted file mode 100644 index ca6685e2b7..0000000000 --- a/vendor/github.com/klauspost/compress/SECURITY.md +++ /dev/null @@ -1,25 +0,0 @@ -# Security Policy - -## Supported Versions - -Security updates are applied only to the latest release. - -## Vulnerability Definition - -A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability. - -Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently. - -Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue. - -It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability. - -Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround. - -## Reporting a Vulnerability - -If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. - -Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that. - -This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base. diff --git a/vendor/github.com/klauspost/compress/compressible.go b/vendor/github.com/klauspost/compress/compressible.go deleted file mode 100644 index ea5a692d51..0000000000 --- a/vendor/github.com/klauspost/compress/compressible.go +++ /dev/null @@ -1,85 +0,0 @@ -package compress - -import "math" - -// Estimate returns a normalized compressibility estimate of block b. -// Values close to zero are likely uncompressible. -// Values above 0.1 are likely to be compressible. -// Values above 0.5 are very compressible. -// Very small lengths will return 0. -func Estimate(b []byte) float64 { - if len(b) < 16 { - return 0 - } - - // Correctly predicted order 1 - hits := 0 - lastMatch := false - var o1 [256]byte - var hist [256]int - c1 := byte(0) - for _, c := range b { - if c == o1[c1] { - // We only count a hit if there was two correct predictions in a row. - if lastMatch { - hits++ - } - lastMatch = true - } else { - lastMatch = false - } - o1[c1] = c - c1 = c - hist[c]++ - } - - // Use x^0.6 to give better spread - prediction := math.Pow(float64(hits)/float64(len(b)), 0.6) - - // Calculate histogram distribution - variance := float64(0) - avg := float64(len(b)) / 256 - - for _, v := range hist { - Δ := float64(v) - avg - variance += Δ * Δ - } - - stddev := math.Sqrt(float64(variance)) / float64(len(b)) - exp := math.Sqrt(1 / float64(len(b))) - - // Subtract expected stddev - stddev -= exp - if stddev < 0 { - stddev = 0 - } - stddev *= 1 + exp - - // Use x^0.4 to give better spread - entropy := math.Pow(stddev, 0.4) - - // 50/50 weight between prediction and histogram distribution - return math.Pow((prediction+entropy)/2, 0.9) -} - -// ShannonEntropyBits returns the number of bits minimum required to represent -// an entropy encoding of the input bytes. -// https://en.wiktionary.org/wiki/Shannon_entropy -func ShannonEntropyBits(b []byte) int { - if len(b) == 0 { - return 0 - } - var hist [256]int - for _, c := range b { - hist[c]++ - } - shannon := float64(0) - invTotal := 1.0 / float64(len(b)) - for _, v := range hist[:] { - if v > 0 { - n := float64(v) - shannon += math.Ceil(-math.Log2(n*invTotal) * n) - } - } - return int(math.Ceil(shannon)) -} diff --git a/vendor/github.com/klauspost/compress/fse/README.md b/vendor/github.com/klauspost/compress/fse/README.md deleted file mode 100644 index ea7324da67..0000000000 --- a/vendor/github.com/klauspost/compress/fse/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Finite State Entropy - -This package provides Finite State Entropy encoding and decoding. - -Finite State Entropy (also referenced as [tANS](https://en.wikipedia.org/wiki/Asymmetric_numeral_systems#tANS)) -encoding provides a fast near-optimal symbol encoding/decoding -for byte blocks as implemented in [zstandard](https://github.com/facebook/zstd). - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/fse) - -## News - - * Feb 2018: First implementation released. Consider this beta software for now. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress`](https://godoc.org/github.com/klauspost/compress/fse#Compress) function. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `(error)` | An internal error occurred. | - -As can be seen above there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/fse#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -Decompressing is done by calling the [`Decompress`](https://godoc.org/github.com/klauspost/compress/fse#Decompress) function. -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -For more detailed usage, see examples in the [godoc documentation](https://godoc.org/github.com/klauspost/compress/fse#pkg-examples). - -# Performance - -A lot of factors are affecting speed. Block sizes and compressibility of the material are primary factors. -All compression functions are currently only running on the calling goroutine so only one core will be used per block. - -The compressor is significantly faster if symbols are kept as small as possible. The highest byte value of the input -is used to reduce some of the processing, so if all your input is above byte value 64 for instance, it may be -beneficial to transpose all your input values down by 64. - -With moderate block sizes around 64k speed are typically 200MB/s per core for compression and -around 300MB/s decompression speed. - -The same hardware typically does Huffman (deflate) encoding at 125MB/s and decompression at 100MB/s. - -# Plans - -At one point, more internals will be exposed to facilitate more "expert" usage of the components. - -A streaming interface is also likely to be implemented. Likely compatible with [FSE stream format](https://github.com/Cyan4973/FiniteStateEntropy/blob/dev/programs/fileio.c#L261). - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. \ No newline at end of file diff --git a/vendor/github.com/klauspost/compress/fse/bitreader.go b/vendor/github.com/klauspost/compress/fse/bitreader.go deleted file mode 100644 index f65eb3909c..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitreader.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "encoding/binary" - "errors" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) uint16 { - if n == 0 || b.bitsRead >= 64 { - return 0 - } - return b.getBitsFast(n) -} - -// getBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) getBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - // 2 bounds checks. - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.bitsRead >= 64 && b.off == 0 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go deleted file mode 100644 index e82fa3bb7b..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ /dev/null @@ -1,167 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import "fmt" - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16ZeroNC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -// This is fastest if bits can be zero. -func (b *bitWriter) addBits16ZeroNC(value uint16, bits uint8) { - if bits == 0 { - return - } - value <<= (16 - bits) & 15 - value >>= (16 - bits) & 15 - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush will flush all pending full bytes. -// There will be at least 56 bits available for writing when this has been called. -// Using flush32 is faster, but leaves less space for writing. -func (b *bitWriter) flush() { - v := b.nBits >> 3 - switch v { - case 0: - case 1: - b.out = append(b.out, - byte(b.bitContainer), - ) - case 2: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - ) - case 3: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - ) - case 4: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - ) - case 5: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - ) - case 6: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - ) - case 7: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - ) - case 8: - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24), - byte(b.bitContainer>>32), - byte(b.bitContainer>>40), - byte(b.bitContainer>>48), - byte(b.bitContainer>>56), - ) - default: - panic(fmt.Errorf("bits (%d) > 64", b.nBits)) - } - b.bitContainer >>= v << 3 - b.nBits &= 7 -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/fse/bytereader.go b/vendor/github.com/klauspost/compress/fse/bytereader.go deleted file mode 100644 index abade2d605..0000000000 --- a/vendor/github.com/klauspost/compress/fse/bytereader.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// init will initialize the reader and set the input. -func (b *byteReader) init(in []byte) { - b.b = in - b.off = 0 -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go deleted file mode 100644 index 074018d8f9..0000000000 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package fse - -import ( - "errors" - "fmt" -) - -// Compress the input bytes. Input must be < 2GB. -// Provide a Scratch buffer to avoid memory allocations. -// Note that the output is also kept in the scratch buffer. -// If input is too hard to compress, ErrIncompressible is returned. -// If input is a single byte value repeated ErrUseRLE is returned. -func Compress(in []byte, s *Scratch) ([]byte, error) { - if len(in) <= 1 { - return nil, ErrIncompressible - } - if len(in) > (2<<30)-1 { - return nil, errors.New("input too big, must be < 2GB") - } - s, err := s.prepare(in) - if err != nil { - return nil, err - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - if maxCount == 0 { - maxCount = s.countSimple(in) - } - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount == len(in) { - // One symbol, use RLE - return nil, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, ErrIncompressible - } - s.optimalTableLog() - err = s.normalizeCount() - if err != nil { - return nil, err - } - err = s.writeCount() - if err != nil { - return nil, err - } - - if false { - err = s.validateNorm() - if err != nil { - return nil, err - } - } - - err = s.buildCTable() - if err != nil { - return nil, err - } - err = s.compress(in) - if err != nil { - return nil, err - } - s.Out = s.bw.out - // Check if we compressed. - if len(s.Out) >= len(in) { - return nil, ErrIncompressible - } - return s.Out, nil -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, tableLog uint8, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + first.deltaFindState - c.state = c.stateTable[lu] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encode(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16NC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// encode the output symbol provided and write it to the bitstream. -func (c *cState) encodeZero(symbolTT symbolTransform) { - nbBitsOut := (uint32(c.state) + symbolTT.deltaNbBits) >> 16 - dstState := int32(c.state>>(nbBitsOut&15)) + symbolTT.deltaFindState - c.bw.addBits16ZeroNC(c.state, uint8(nbBitsOut)) - c.state = c.stateTable[dstState] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) - c.bw.flush() -} - -// compress is the main compression loop that will encode the input from the last byte to the first. -func (s *Scratch) compress(src []byte) error { - if len(src) <= 2 { - return errors.New("compress: src too small") - } - tt := s.ct.symbolTT[:256] - s.bw.reset(s.Out) - - // Our two states each encodes every second byte. - // Last byte encoded (first byte decoded) will always be encoded by c1. - var c1, c2 cState - - // Encode so remaining size is divisible by 4. - ip := len(src) - if ip&1 == 1 { - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - c1.encodeZero(tt[src[ip-3]]) - ip -= 3 - } else { - c2.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-1]]) - c1.init(&s.bw, &s.ct, s.actualTableLog, tt[src[ip-2]]) - ip -= 2 - } - if ip&2 != 0 { - c2.encodeZero(tt[src[ip-1]]) - c1.encodeZero(tt[src[ip-2]]) - ip -= 2 - } - src = src[:ip] - - // Main compression loop. - switch { - case !s.zeroBits && s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush. - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case !s.zeroBits: - // We do not need to check if any output is 0 bits. - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encode(tt[v0]) - c1.encode(tt[v1]) - s.bw.flush32() - c2.encode(tt[v2]) - c1.encode(tt[v3]) - } - case s.actualTableLog <= 8: - // We can encode 4 symbols without requiring a flush - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - default: - for ; len(src) >= 4; src = src[:len(src)-4] { - s.bw.flush32() - v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1] - c2.encodeZero(tt[v0]) - c1.encodeZero(tt[v1]) - s.bw.flush32() - c2.encodeZero(tt[v2]) - c1.encodeZero(tt[v3]) - } - } - - // Flush final state. - // Used to initialize state when decoding. - c2.flush(s.actualTableLog) - c1.flush(s.actualTableLog) - - s.bw.close() - return nil -} - -// writeCount will write the normalized histogram count to header. -// This is read back by readNCount. -func (s *Scratch) writeCount() error { - var ( - tableLog = s.actualTableLog - tableSize = 1 << tableLog - previous0 bool - charnum uint16 - - maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3 - - // Write Table Size - bitStream = uint32(tableLog - minTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - ) - if cap(s.Out) < maxHeaderSize { - s.Out = make([]byte, 0, s.br.remain()+maxHeaderSize) - } - outP := uint(0) - out := s.Out[:maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return errors.New("internal error: remaining<1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += (bitCount + 7) / 8 - - if charnum > s.symbolLen { - return errors.New("internal error: charnum > s.symbolLen") - } - s.Out = out[:outP] - return nil -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaFindState int32 - deltaNbBits uint32 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("dnbits: %08x, fs:%d", s.deltaNbBits, s.deltaFindState) -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *Scratch) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *Scratch) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [maxSymbolValue + 2]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = int32(total - 1) - total++ - default: - maxBitsOut := uint32(tableLog) - highBits(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = int32(total - v) - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int) { - for _, v := range in { - s.count[v]++ - } - m, symlen := uint32(0), s.symbolLen - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - symlen = uint16(i) + 1 - } - s.symbolLen = symlen - return int(m) -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBits(uint32(s.br.remain()-1)) + 1 - minBitsSymbols := highBits(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBits(uint32(s.br.remain()-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > maxTableLog { - tableLog = maxTableLog - } - s.actualTableLog = tableLog -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -func (s *Scratch) normalizeCount() error { - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(s.br.remain()) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(s.br.remain() >> tableLog) - ) - - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - return s.normalizeCount2() - } - s.norm[largest] += stillToDistribute - return nil -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *Scratch) normalizeCount2() error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(s.br.remain()) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// validateNorm validates the normalized histogram table. -func (s *Scratch) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1< tablelogAbsoluteMax { - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 { - if previous0 { - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - n0 += 24 - if b.off < iend-5 { - b.advance(2) - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 16 - bitCount += 16 - } - } - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - for charnum < n0 { - s.norm[charnum&0xff] = 0 - charnum++ - } - - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - bitStream = b.Uint32() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*(threshold) - 1) - (remaining) - var count int32 - - if (int32(bitStream) & (threshold - 1)) < max { - count = int32(bitStream) & (threshold - 1) - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - count-- // extra accuracy - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - if b.off <= iend-7 || b.off+int(bitCount>>3) <= iend-4 { - b.advance(bitCount >> 3) - bitCount &= 7 - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - } - bitStream = b.Uint32() >> (bitCount & 31) - } - s.symbolLen = charnum - - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return nil -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -type decSymbol struct { - newState uint16 - symbol uint8 - nbBits uint8 -} - -// allocDtable will allocate decoding tables if they are not big enough. -func (s *Scratch) allocDtable() { - tableSize := 1 << s.actualTableLog - if cap(s.decTable) < tableSize { - s.decTable = make([]decSymbol, tableSize) - } - s.decTable = s.decTable[:tableSize] - - if cap(s.ct.tableSymbol) < 256 { - s.ct.tableSymbol = make([]byte, 256) - } - s.ct.tableSymbol = s.ct.tableSymbol[:256] - - if cap(s.ct.stateTable) < 256 { - s.ct.stateTable = make([]uint16, 256) - } - s.ct.stateTable = s.ct.stateTable[:256] -} - -// buildDtable will build the decoding table. -func (s *Scratch) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - s.allocDtable() - symbolNext := s.ct.stateTable[:256] - - // Init, lay down lowprob symbols - s.zeroBits = false - { - largeLimit := int16(1 << (s.actualTableLog - 1)) - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.decTable[highThreshold].symbol = uint8(i) - highThreshold-- - symbolNext[i] = 1 - } else { - if v >= largeLimit { - s.zeroBits = true - } - symbolNext[i] = uint16(v) - } - } - } - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.decTable[position].symbol = uint8(ss) - position = (position + step) & tableMask - for position > highThreshold { - // lowprob area - position = (position + step) & tableMask - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.decTable { - symbol := v.symbol - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.decTable[u].nbBits = nBits - newState := (nextState << nBits) - tableSize - if newState >= tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.decTable[u].newState = newState - } - } - return nil -} - -// decompress will decompress the bitstream. -// If the buffer is over-read an error is returned. -func (s *Scratch) decompress() error { - br := &s.bits - if err := br.init(s.br.unread()); err != nil { - return err - } - - var s1, s2 decoder - // Initialize and decode first state and symbol. - s1.init(br, s.decTable, s.actualTableLog) - s2.init(br, s.decTable, s.actualTableLog) - - // Use temp table to avoid bound checks/append penalty. - var tmp = s.ct.tableSymbol[:256] - var off uint8 - - // Main part - if !s.zeroBits { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.nextFast() - tmp[off+1] = s2.nextFast() - br.fillFast() - tmp[off+2] = s1.nextFast() - tmp[off+3] = s2.nextFast() - off += 4 - // When off is 0, we have overflowed and should write. - if off == 0 { - s.Out = append(s.Out, tmp...) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } else { - for br.off >= 8 { - br.fillFast() - tmp[off+0] = s1.next() - tmp[off+1] = s2.next() - br.fillFast() - tmp[off+2] = s1.next() - tmp[off+3] = s2.next() - off += 4 - if off == 0 { - s.Out = append(s.Out, tmp...) - // When off is 0, we have overflowed and should write. - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - } - } - s.Out = append(s.Out, tmp[:off]...) - - // Final bits, a bit more expensive check - for { - if s1.finished() { - s.Out = append(s.Out, s1.final(), s2.final()) - break - } - br.fill() - s.Out = append(s.Out, s1.next()) - if s2.finished() { - s.Out = append(s.Out, s2.final(), s1.final()) - break - } - s.Out = append(s.Out, s2.next()) - if len(s.Out) >= s.DecompressLimit { - return fmt.Errorf("output size (%d) > DecompressLimit (%d)", len(s.Out), s.DecompressLimit) - } - } - return br.close() -} - -// decoder keeps track of the current state and updates it from the bitstream. -type decoder struct { - state uint16 - br *bitReader - dt []decSymbol -} - -// init will initialize the decoder and read the first state from the stream. -func (d *decoder) init(in *bitReader, dt []decSymbol, tableLog uint8) { - d.dt = dt - d.br = in - d.state = in.getBits(tableLog) -} - -// next returns the next symbol and sets the next state. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) next() uint8 { - n := &d.dt[d.state] - lowBits := d.br.getBits(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} - -// finished returns true if all bits have been read from the bitstream -// and the next state would require reading bits from the input. -func (d *decoder) finished() bool { - return d.br.finished() && d.dt[d.state].nbBits > 0 -} - -// final returns the current state symbol without decoding the next. -func (d *decoder) final() uint8 { - return d.dt[d.state].symbol -} - -// nextFast returns the next symbol and sets the next state. -// This can only be used if no symbols are 0 bits. -// At least tablelog bits must be available in the bit reader. -func (d *decoder) nextFast() uint8 { - n := d.dt[d.state] - lowBits := d.br.getBitsFast(n.nbBits) - d.state = n.newState + lowBits - return n.symbol -} diff --git a/vendor/github.com/klauspost/compress/fse/fse.go b/vendor/github.com/klauspost/compress/fse/fse.go deleted file mode 100644 index 535cbadfde..0000000000 --- a/vendor/github.com/klauspost/compress/fse/fse.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -// Package fse provides Finite State Entropy encoding and decoding. -// -// Finite State Entropy encoding provides a fast near-optimal symbol encoding/decoding -// for byte blocks as implemented in zstd. -// -// See https://github.com/klauspost/compress/tree/master/fse for more information. -package fse - -import ( - "errors" - "fmt" - "math/bits" -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = 14 - defaultMemoryUsage = 13 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - defaultTablelog = defaultMemoryUsage - 2 - minTablelog = 5 - maxSymbolValue = 255 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") -) - -// Scratch provides temporary storage for compression and decompression. -type Scratch struct { - // Private - count [maxSymbolValue + 1]uint32 - norm [maxSymbolValue + 1]int16 - br byteReader - bits bitReader - bw bitWriter - ct cTable // Compression tables. - decTable []decSymbol // Decompression table. - maxCount int // count of the most probable symbol - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // DecompressLimit limits the maximum decoded size acceptable. - // If > 0 decompression will stop when approximately this many bytes - // has been decoded. - // If 0, maximum size will be 2GB. - DecompressLimit int - - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - TableLog uint8 -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -// The returned slice will always be length 256. -func (s *Scratch) Histogram() []uint32 { - return s.count[:] -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *Scratch) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// prepare will prepare and allocate scratch tables used for both compression and decompression. -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = 255 - } - if s.TableLog == 0 { - s.TableLog = defaultTablelog - } - if s.TableLog > maxTableLog { - return nil, fmt.Errorf("tableLog (%d) > maxTableLog (%d)", s.TableLog, maxTableLog) - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - s.br.init(in) - if s.DecompressLimit == 0 { - // Max size 2GB. - s.DecompressLimit = (2 << 30) - 1 - } - - return s, nil -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/gen.sh b/vendor/github.com/klauspost/compress/gen.sh deleted file mode 100644 index aff942205f..0000000000 --- a/vendor/github.com/klauspost/compress/gen.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd s2/cmd/_s2sx/ || exit 1 -go generate . diff --git a/vendor/github.com/klauspost/compress/huff0/.gitignore b/vendor/github.com/klauspost/compress/huff0/.gitignore deleted file mode 100644 index b3d262958f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/huff0-fuzz.zip diff --git a/vendor/github.com/klauspost/compress/huff0/README.md b/vendor/github.com/klauspost/compress/huff0/README.md deleted file mode 100644 index 8b6e5c6638..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/README.md +++ /dev/null @@ -1,89 +0,0 @@ -# Huff0 entropy compression - -This package provides Huff0 encoding and decoding as used in zstd. - -[Huff0](https://github.com/Cyan4973/FiniteStateEntropy#new-generation-entropy-coders), -a Huffman codec designed for modern CPU, featuring OoO (Out of Order) operations on multiple ALU -(Arithmetic Logic Unit), achieving extremely fast compression and decompression speeds. - -This can be used for compressing input with a lot of similar input values to the smallest number of bytes. -This does not perform any multi-byte [dictionary coding](https://en.wikipedia.org/wiki/Dictionary_coder) as LZ coders, -but it can be used as a secondary step to compressors (like Snappy) that does not do entropy encoding. - -* [Godoc documentation](https://godoc.org/github.com/klauspost/compress/huff0) - -## News - -This is used as part of the [zstandard](https://github.com/klauspost/compress/tree/master/zstd#zstd) compression and decompression package. - -This ensures that most functionality is well tested. - -# Usage - -This package provides a low level interface that allows to compress single independent blocks. - -Each block is separate, and there is no built in integrity checks. -This means that the caller should keep track of block sizes and also do checksums if needed. - -Compressing a block is done via the [`Compress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress1X) and -[`Compress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Compress4X) functions. -You must provide input and will receive the output and maybe an error. - -These error values can be returned: - -| Error | Description | -|---------------------|-----------------------------------------------------------------------------| -| `` | Everything ok, output is returned | -| `ErrIncompressible` | Returned when input is judged to be too hard to compress | -| `ErrUseRLE` | Returned from the compressor when the input is a single byte value repeated | -| `ErrTooBig` | Returned if the input block exceeds the maximum allowed size (128 Kib) | -| `(error)` | An internal error occurred. | - - -As can be seen above some of there are errors that will be returned even under normal operation so it is important to handle these. - -To reduce allocations you can provide a [`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object -that can be re-used for successive calls. Both compression and decompression accepts a `Scratch` object, and the same -object can be used for both. - -Be aware, that when re-using a `Scratch` object that the *output* buffer is also re-used, so if you are still using this -you must set the `Out` field in the scratch to nil. The same buffer is used for compression and decompression output. - -The `Scratch` object will retain state that allows to re-use previous tables for encoding and decoding. - -## Tables and re-use - -Huff0 allows for reusing tables from the previous block to save space if that is expected to give better/faster results. - -The Scratch object allows you to set a [`ReusePolicy`](https://godoc.org/github.com/klauspost/compress/huff0#ReusePolicy) -that controls this behaviour. See the documentation for details. This can be altered between each block. - -Do however note that this information is *not* stored in the output block and it is up to the users of the package to -record whether [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable) should be called, -based on the boolean reported back from the CompressXX call. - -If you want to store the table separate from the data, you can access them as `OutData` and `OutTable` on the -[`Scratch`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch) object. - -## Decompressing - -The first part of decoding is to initialize the decoding table through [`ReadTable`](https://godoc.org/github.com/klauspost/compress/huff0#ReadTable). -This will initialize the decoding tables. -You can supply the complete block to `ReadTable` and it will return the data part of the block -which can be given to the decompressor. - -Decompressing is done by calling the [`Decompress1X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress1X) -or [`Decompress4X`](https://godoc.org/github.com/klauspost/compress/huff0#Scratch.Decompress4X) function. - -For concurrently decompressing content with a fixed table a stateless [`Decoder`](https://godoc.org/github.com/klauspost/compress/huff0#Decoder) can be requested which will remain correct as long as the scratch is unchanged. The capacity of the provided slice indicates the expected output size. - -You must provide the output from the compression stage, at exactly the size you got back. If you receive an error back -your input was likely corrupted. - -It is important to note that a successful decoding does *not* mean your output matches your original input. -There are no integrity checks, so relying on errors from the decompressor does not assure your data is valid. - -# Contributing - -Contributions are always welcome. Be aware that adding public functions will require good justification and breaking -changes will likely not be accepted. If in doubt open an issue before writing the PR. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go deleted file mode 100644 index e36d9742f9..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderBytes struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderBytes) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderBytes) peekByteFast() uint8 { - got := uint8(b.value >> 56) - return got -} - -func (b *bitReaderBytes) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderBytes) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. -func (b *bitReaderBytes) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderBytes) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << (b.bitsRead - 32) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << (b.bitsRead - 8) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReaderBytes) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -func (b *bitReaderBytes) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderBytes) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -// bitReaderShifted reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReaderShifted struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReaderShifted) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.advance(8 - uint8(highBit32(uint32(v)))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 { - return uint16(b.value >> ((64 - n) & 63)) -} - -func (b *bitReaderShifted) advance(n uint8) { - b.bitsRead += n - b.value <<= n & 63 -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReaderShifted) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 -} - -// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. -func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReaderShifted) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value |= uint64(low) << ((b.bitsRead - 32) & 63) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value |= uint64(b.in[b.off-1]) << ((b.bitsRead - 8) & 63) - b.bitsRead -= 8 - b.off-- - } -} - -func (b *bitReaderShifted) remaining() uint { - return b.off*8 + uint(64-b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReaderShifted) close() error { - // Release reference. - b.in = nil - if b.remaining() > 0 { - return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go deleted file mode 100644 index 0ebc9aaac7..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package huff0 - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// encSymbol will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encSymbol(ct cTable, symbol byte) { - enc := ct[symbol] - b.bitContainer |= uint64(enc.val) << (b.nBits & 63) - if false { - if enc.nBits == 0 { - panic("nbits 0") - } - } - b.nBits += enc.nBits -} - -// encTwoSymbols will add up to 32 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) encTwoSymbols(ct cTable, av, bv byte) { - encA := ct[av] - encB := ct[bv] - sh := b.nBits & 63 - combined := uint64(encA.val) | (uint64(encB.val) << (encA.nBits & 63)) - b.bitContainer |= combined << sh - if false { - if encA.nBits == 0 { - panic("nbitsA 0") - } - if encB.nBits == 0 { - panic("nbitsB 0") - } - } - b.nBits += encA.nBits + encB.nBits -} - -// encFourSymbols adds up to 32 bits from four symbols. -// It will not check if there is space for them, -// so the caller must ensure that b has been flushed recently. -func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) { - bitsA := encA.nBits - bitsB := bitsA + encB.nBits - bitsC := bitsB + encC.nBits - bitsD := bitsC + encD.nBits - combined := uint64(encA.val) | - (uint64(encB.val) << (bitsA & 63)) | - (uint64(encC.val) << (bitsB & 63)) | - (uint64(encD.val) << (bitsC & 63)) - b.bitContainer |= combined << (b.nBits & 63) - b.nBits += bitsD -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go deleted file mode 100644 index 84aa3d12f0..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ /dev/null @@ -1,742 +0,0 @@ -package huff0 - -import ( - "fmt" - "math" - "runtime" - "sync" -) - -// Compress1X will compress the input. -// The output can be decoded using Decompress1X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress1X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - return compress(in, s, s.compress1X) -} - -// Compress4X will compress the input. The input is split into 4 independent blocks -// and compressed similar to Compress1X. -// The output can be decoded using Decompress4X. -// Supply a Scratch object. The scratch object contains state about re-use, -// So when sharing across independent encodes, be sure to set the re-use policy. -func Compress4X(in []byte, s *Scratch) (out []byte, reUsed bool, err error) { - s, err = s.prepare(in) - if err != nil { - return nil, false, err - } - if false { - // TODO: compress4Xp only slightly faster. - const parallelThreshold = 8 << 10 - if len(in) < parallelThreshold || runtime.GOMAXPROCS(0) == 1 { - return compress(in, s, s.compress4X) - } - return compress(in, s, s.compress4Xp) - } - return compress(in, s, s.compress4X) -} - -func compress(in []byte, s *Scratch, compressor func(src []byte) ([]byte, error)) (out []byte, reUsed bool, err error) { - // Nuke previous table if we cannot reuse anyway. - if s.Reuse == ReusePolicyNone { - s.prevTable = s.prevTable[:0] - } - - // Create histogram, if none was provided. - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return nil, false, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return nil, false, ErrIncompressible - } - // One symbol, use RLE - return nil, false, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return nil, false, ErrIncompressible - } - if s.Reuse == ReusePolicyMust && !canReuse { - // We must reuse, but we can't. - return nil, false, ErrIncompressible - } - if (s.Reuse == ReusePolicyPrefer || s.Reuse == ReusePolicyMust) && canReuse { - keepTable := s.cTable - keepTL := s.actualTableLog - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - s.cTable = keepTable - s.actualTableLog = keepTL - if err == nil && len(s.Out) < wantSize { - s.OutData = s.Out - return s.Out, true, nil - } - if s.Reuse == ReusePolicyMust { - return nil, false, ErrIncompressible - } - // Do not attempt to re-use later. - s.prevTable = s.prevTable[:0] - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return nil, false, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - if s.Reuse == ReusePolicyAllow && canReuse { - hSize := len(s.Out) - oldSize := s.prevTable.estimateSize(s.count[:s.symbolLen]) - newSize := s.cTable.estimateSize(s.count[:s.symbolLen]) - if oldSize <= hSize+newSize || hSize+12 >= wantSize { - // Retain cTable even if we re-use. - keepTable := s.cTable - keepTL := s.actualTableLog - - s.cTable = s.prevTable - s.actualTableLog = s.prevTableLog - s.Out, err = compressor(in) - - // Restore ctable. - s.cTable = keepTable - s.actualTableLog = keepTL - if err != nil { - return nil, false, err - } - if len(s.Out) >= wantSize { - return nil, false, ErrIncompressible - } - s.OutData = s.Out - return s.Out, true, nil - } - } - - // Use new table - err = s.cTable.write(s) - if err != nil { - s.OutTable = nil - return nil, false, err - } - s.OutTable = s.Out - - // Compress using new table - s.Out, err = compressor(in) - if err != nil { - s.OutTable = nil - return nil, false, err - } - if len(s.Out) >= wantSize { - s.OutTable = nil - return nil, false, ErrIncompressible - } - // Move current table into previous. - s.prevTable, s.prevTableLog, s.cTable = s.cTable, s.actualTableLog, s.prevTable[:0] - s.OutData = s.Out[len(s.OutTable):] - return s.Out, false, nil -} - -// EstimateSizes will estimate the data sizes -func EstimateSizes(in []byte, s *Scratch) (tableSz, dataSz, reuseSz int, err error) { - s, err = s.prepare(in) - if err != nil { - return 0, 0, 0, err - } - - // Create histogram, if none was provided. - tableSz, dataSz, reuseSz = -1, -1, -1 - maxCount := s.maxCount - var canReuse = false - if maxCount == 0 { - maxCount, canReuse = s.countSimple(in) - } else { - canReuse = s.canUseTable(s.prevTable) - } - - // We want the output size to be less than this: - wantSize := len(in) - if s.WantLogLess > 0 { - wantSize -= wantSize >> s.WantLogLess - } - - // Reset for next run. - s.clearCount = true - s.maxCount = 0 - if maxCount >= len(in) { - if maxCount > len(in) { - return 0, 0, 0, fmt.Errorf("maxCount (%d) > length (%d)", maxCount, len(in)) - } - if len(in) == 1 { - return 0, 0, 0, ErrIncompressible - } - // One symbol, use RLE - return 0, 0, 0, ErrUseRLE - } - if maxCount == 1 || maxCount < (len(in)>>7) { - // Each symbol present maximum once or too well distributed. - return 0, 0, 0, ErrIncompressible - } - - // Calculate new table. - err = s.buildCTable() - if err != nil { - return 0, 0, 0, err - } - - if false && !s.canUseTable(s.cTable) { - panic("invalid table generated") - } - - tableSz, err = s.cTable.estTableSize(s) - if err != nil { - return 0, 0, 0, err - } - if canReuse { - reuseSz = s.prevTable.estimateSize(s.count[:s.symbolLen]) - } - dataSz = s.cTable.estimateSize(s.count[:s.symbolLen]) - - // Restore - return tableSz, dataSz, reuseSz, nil -} - -func (s *Scratch) compress1X(src []byte) ([]byte, error) { - return s.compress1xDo(s.Out, src), nil -} - -func (s *Scratch) compress1xDo(dst, src []byte) []byte { - var bw = bitWriter{out: dst} - - // N is length divisible by 4. - n := len(src) - n -= n & 3 - cTable := s.cTable[:256] - - // Encode last bytes. - for i := len(src) & 3; i > 0; i-- { - bw.encSymbol(cTable, src[n+i-1]) - } - n -= 4 - if s.actualTableLog <= 8 { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]]) - } - } else { - for ; n >= 0; n -= 4 { - tmp := src[n : n+4] - // tmp should be len 4 - bw.flush32() - bw.encTwoSymbols(cTable, tmp[3], tmp[2]) - bw.flush32() - bw.encTwoSymbols(cTable, tmp[1], tmp[0]) - } - } - bw.close() - return bw.out -} - -var sixZeros [6]byte - -func (s *Scratch) compress4X(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - segmentSize := (len(src) + 3) / 4 - - // Add placeholder for output length - offsetIdx := len(s.Out) - s.Out = append(s.Out, sixZeros[:]...) - - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - idx := len(s.Out) - s.Out = s.compress1xDo(s.Out, toDo) - if len(s.Out)-idx > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - length := len(s.Out) - idx - s.Out[i*2+offsetIdx] = byte(length) - s.Out[i*2+offsetIdx+1] = byte(length >> 8) - } - } - - return s.Out, nil -} - -// compress4Xp will compress 4 streams using separate goroutines. -func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { - if len(src) < 12 { - return nil, ErrIncompressible - } - // Add placeholder for output length - s.Out = s.Out[:6] - - segmentSize := (len(src) + 3) / 4 - var wg sync.WaitGroup - wg.Add(4) - for i := 0; i < 4; i++ { - toDo := src - if len(toDo) > segmentSize { - toDo = toDo[:segmentSize] - } - src = src[len(toDo):] - - // Separate goroutine for each block. - go func(i int) { - s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo) - wg.Done() - }(i) - } - wg.Wait() - for i := 0; i < 4; i++ { - o := s.tmpOut[i] - if len(o) > math.MaxUint16 { - // We cannot store the size in the jump table - return nil, ErrIncompressible - } - // Write compressed length as little endian before block. - if i < 3 { - // Last length is not written. - s.Out[i*2] = byte(len(o)) - s.Out[i*2+1] = byte(len(o) >> 8) - } - - // Write output. - s.Out = append(s.Out, o...) - } - return s.Out, nil -} - -// countSimple will create a simple histogram in s.count. -// Returns the biggest count. -// Does not update s.clearCount. -func (s *Scratch) countSimple(in []byte) (max int, reuse bool) { - reuse = true - _ = s.count // Assert that s != nil to speed up the following loop. - for _, v := range in { - s.count[v]++ - } - m := uint32(0) - if len(s.prevTable) > 0 { - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - if i >= len(s.prevTable) { - reuse = false - } else if s.prevTable[i].nBits == 0 { - reuse = false - } - } - return int(m), reuse - } - for i, v := range s.count[:] { - if v == 0 { - continue - } - if v > m { - m = v - } - s.symbolLen = uint16(i) + 1 - } - return int(m), false -} - -func (s *Scratch) canUseTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 && c[i].nBits == 0 { - return false - } - } - return true -} - -//lint:ignore U1000 used for debugging -func (s *Scratch) validateTable(c cTable) bool { - if len(c) < int(s.symbolLen) { - return false - } - for i, v := range s.count[:s.symbolLen] { - if v != 0 { - if c[i].nBits == 0 { - return false - } - if c[i].nBits > s.actualTableLog { - return false - } - } - } - return true -} - -// minTableLog provides the minimum logSize to safely represent a distribution. -func (s *Scratch) minTableLog() uint8 { - minBitsSrc := highBit32(uint32(s.srcLen)) + 1 - minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2 - if minBitsSrc < minBitsSymbols { - return uint8(minBitsSrc) - } - return uint8(minBitsSymbols) -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *Scratch) optimalTableLog() { - tableLog := s.TableLog - minBits := s.minTableLog() - maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minTablelog { - tableLog = minTablelog - } - if tableLog > tableLogMax { - tableLog = tableLogMax - } - s.actualTableLog = tableLog -} - -type cTableEntry struct { - val uint16 - nBits uint8 - // We have 8 bits extra -} - -const huffNodesMask = huffNodesLen - 1 - -func (s *Scratch) buildCTable() error { - s.optimalTableLog() - s.huffSort() - if cap(s.cTable) < maxSymbolValue+1 { - s.cTable = make([]cTableEntry, s.symbolLen, maxSymbolValue+1) - } else { - s.cTable = s.cTable[:s.symbolLen] - for i := range s.cTable { - s.cTable[i] = cTableEntry{} - } - } - - var startNode = int16(s.symbolLen) - nonNullRank := s.symbolLen - 1 - - nodeNb := startNode - huffNode := s.nodes[1 : huffNodesLen+1] - - // This overlays the slice above, but allows "-1" index lookups. - // Different from reference implementation. - huffNode0 := s.nodes[0 : huffNodesLen+1] - - for huffNode[nonNullRank].count() == 0 { - nonNullRank-- - } - - lowS := int16(nonNullRank) - nodeRoot := nodeNb + lowS - 1 - lowN := nodeNb - huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count()) - huffNode[lowS].setParent(nodeNb) - huffNode[lowS-1].setParent(nodeNb) - nodeNb++ - lowS -= 2 - for n := nodeNb; n <= nodeRoot; n++ { - huffNode[n].setCount(1 << 30) - } - // fake entry, strong barrier - huffNode0[0].setCount(1 << 31) - - // create parents - for nodeNb <= nodeRoot { - var n1, n2 int16 - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n1 = lowS - lowS-- - } else { - n1 = lowN - lowN++ - } - if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() { - n2 = lowS - lowS-- - } else { - n2 = lowN - lowN++ - } - - huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count()) - huffNode0[n1+1].setParent(nodeNb) - huffNode0[n2+1].setParent(nodeNb) - nodeNb++ - } - - // distribute weights (unlimited tree height) - huffNode[nodeRoot].setNbBits(0) - for n := nodeRoot - 1; n >= startNode; n-- { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - for n := uint16(0); n <= nonNullRank; n++ { - huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1) - } - s.actualTableLog = s.setMaxHeight(int(nonNullRank)) - maxNbBits := s.actualTableLog - - // fill result into tree (val, nbBits) - if maxNbBits > tableLogMax { - return fmt.Errorf("internal error: maxNbBits (%d) > tableLogMax (%d)", maxNbBits, tableLogMax) - } - var nbPerRank [tableLogMax + 1]uint16 - var valPerRank [16]uint16 - for _, v := range huffNode[:nonNullRank+1] { - nbPerRank[v.nbBits()]++ - } - // determine stating value per rank - { - min := uint16(0) - for n := maxNbBits; n > 0; n-- { - // get starting value within each rank - valPerRank[n] = min - min += nbPerRank[n] - min >>= 1 - } - } - - // push nbBits per symbol, symbol order - for _, v := range huffNode[:nonNullRank+1] { - s.cTable[v.symbol()].nBits = v.nbBits() - } - - // assign value within rank, symbol order - t := s.cTable[:s.symbolLen] - for n, val := range t { - nbits := val.nBits & 15 - v := valPerRank[nbits] - t[n].val = v - valPerRank[nbits] = v + 1 - } - - return nil -} - -// huffSort will sort symbols, decreasing order. -func (s *Scratch) huffSort() { - type rankPos struct { - base uint32 - current uint32 - } - - // Clear nodes - nodes := s.nodes[:huffNodesLen+1] - s.nodes = nodes - nodes = nodes[1 : huffNodesLen+1] - - // Sort into buckets based on length of symbol count. - var rank [32]rankPos - for _, v := range s.count[:s.symbolLen] { - r := highBit32(v+1) & 31 - rank[r].base++ - } - // maxBitLength is log2(BlockSizeMax) + 1 - const maxBitLength = 18 + 1 - for n := maxBitLength; n > 0; n-- { - rank[n-1].base += rank[n].base - } - for n := range rank[:maxBitLength] { - rank[n].current = rank[n].base - } - for n, c := range s.count[:s.symbolLen] { - r := (highBit32(c+1) + 1) & 31 - pos := rank[r].current - rank[r].current++ - prev := nodes[(pos-1)&huffNodesMask] - for pos > rank[r].base && c > prev.count() { - nodes[pos&huffNodesMask] = prev - pos-- - prev = nodes[(pos-1)&huffNodesMask] - } - nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n)) - } -} - -func (s *Scratch) setMaxHeight(lastNonNull int) uint8 { - maxNbBits := s.actualTableLog - huffNode := s.nodes[1 : huffNodesLen+1] - //huffNode = huffNode[: huffNodesLen] - - largestBits := huffNode[lastNonNull].nbBits() - - // early exit : no elt > maxNbBits - if largestBits <= maxNbBits { - return largestBits - } - totalCost := int(0) - baseCost := int(1) << (largestBits - maxNbBits) - n := uint32(lastNonNull) - - for huffNode[n].nbBits() > maxNbBits { - totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits())) - huffNode[n].setNbBits(maxNbBits) - n-- - } - // n stops at huffNode[n].nbBits <= maxNbBits - - for huffNode[n].nbBits() == maxNbBits { - n-- - } - // n end at index of smallest symbol using < maxNbBits - - // renorm totalCost - totalCost >>= largestBits - maxNbBits /* note : totalCost is necessarily a multiple of baseCost */ - - // repay normalized cost - { - const noSymbol = 0xF0F0F0F0 - var rankLast [tableLogMax + 2]uint32 - - for i := range rankLast[:] { - rankLast[i] = noSymbol - } - - // Get pos of last (smallest) symbol per rank - { - currentNbBits := maxNbBits - for pos := int(n); pos >= 0; pos-- { - if huffNode[pos].nbBits() >= currentNbBits { - continue - } - currentNbBits = huffNode[pos].nbBits() // < maxNbBits - rankLast[maxNbBits-currentNbBits] = uint32(pos) - } - } - - for totalCost > 0 { - nBitsToDecrease := uint8(highBit32(uint32(totalCost))) + 1 - - for ; nBitsToDecrease > 1; nBitsToDecrease-- { - highPos := rankLast[nBitsToDecrease] - lowPos := rankLast[nBitsToDecrease-1] - if highPos == noSymbol { - continue - } - if lowPos == noSymbol { - break - } - highTotal := huffNode[highPos].count() - lowTotal := 2 * huffNode[lowPos].count() - if highTotal <= lowTotal { - break - } - } - // only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) - // HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary - // FIXME: try to remove - for (nBitsToDecrease <= tableLogMax) && (rankLast[nBitsToDecrease] == noSymbol) { - nBitsToDecrease++ - } - totalCost -= 1 << (nBitsToDecrease - 1) - if rankLast[nBitsToDecrease-1] == noSymbol { - // this rank is no longer empty - rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease] - } - huffNode[rankLast[nBitsToDecrease]].setNbBits(1 + - huffNode[rankLast[nBitsToDecrease]].nbBits()) - if rankLast[nBitsToDecrease] == 0 { - /* special case, reached largest symbol */ - rankLast[nBitsToDecrease] = noSymbol - } else { - rankLast[nBitsToDecrease]-- - if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease { - rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */ - } - } - } - - for totalCost < 0 { /* Sometimes, cost correction overshoot */ - if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */ - for huffNode[n].nbBits() == maxNbBits { - n-- - } - huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1) - rankLast[1] = n + 1 - totalCost++ - continue - } - huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1) - rankLast[1]++ - totalCost++ - } - } - return maxNbBits -} - -// A nodeElt is the fields -// -// count uint32 -// parent uint16 -// symbol byte -// nbBits uint8 -// -// in some order, all squashed into an integer so that the compiler -// always loads and stores entire nodeElts instead of separate fields. -type nodeElt uint64 - -func makeNodeElt(count uint32, symbol byte) nodeElt { - return nodeElt(count) | nodeElt(symbol)<<48 -} - -func (e *nodeElt) count() uint32 { return uint32(*e) } -func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) } -func (e *nodeElt) symbol() byte { return byte(*e >> 48) } -func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) } - -func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) } -func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 } -func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 } diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go deleted file mode 100644 index 0f56b02d74..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ /dev/null @@ -1,1167 +0,0 @@ -package huff0 - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/klauspost/compress/fse" -) - -type dTable struct { - single []dEntrySingle -} - -// single-symbols decoding -type dEntrySingle struct { - entry uint16 -} - -// Uses special code for all tables that are < 8 bits. -const use8BitTables = true - -// ReadTable will read a table from the input. -// The size of the input may be larger than the table definition. -// Any content remaining after the table definition will be returned. -// If no Scratch is provided a new one is allocated. -// The returned Scratch can be used for encoding or decoding input using this table. -func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) { - s, err = s.prepare(nil) - if err != nil { - return s, nil, err - } - if len(in) <= 1 { - return s, nil, errors.New("input too small for table") - } - iSize := in[0] - in = in[1:] - if iSize >= 128 { - // Uncompressed - oSize := iSize - 127 - iSize = (oSize + 1) / 2 - if int(iSize) > len(in) { - return s, nil, errors.New("input too small for table") - } - for n := uint8(0); n < oSize; n += 2 { - v := in[n/2] - s.huffWeight[n] = v >> 4 - s.huffWeight[n+1] = v & 15 - } - s.symbolLen = uint16(oSize) - in = in[iSize:] - } else { - if len(in) < int(iSize) { - return s, nil, fmt.Errorf("input too small for table, want %d bytes, have %d", iSize, len(in)) - } - // FSE compressed weights - s.fse.DecompressLimit = 255 - hw := s.huffWeight[:] - s.fse.Out = hw - b, err := fse.Decompress(in[:iSize], s.fse) - s.fse.Out = nil - if err != nil { - return s, nil, fmt.Errorf("fse decompress returned: %w", err) - } - if len(b) > 255 { - return s, nil, errors.New("corrupt input: output table too large") - } - s.symbolLen = uint16(len(b)) - in = in[iSize:] - } - - // collect weight stats - var rankStats [16]uint32 - weightTotal := uint32(0) - for _, v := range s.huffWeight[:s.symbolLen] { - if v > tableLogMax { - return s, nil, errors.New("corrupt input: weight too large") - } - v2 := v & 15 - rankStats[v2]++ - // (1 << (v2-1)) is slower since the compiler cannot prove that v2 isn't 0. - weightTotal += (1 << v2) >> 1 - } - if weightTotal == 0 { - return s, nil, errors.New("corrupt input: weights zero") - } - - // get last non-null symbol weight (implied, total must be 2^n) - { - tableLog := highBit32(weightTotal) + 1 - if tableLog > tableLogMax { - return s, nil, errors.New("corrupt input: tableLog too big") - } - s.actualTableLog = uint8(tableLog) - // determine last weight - { - total := uint32(1) << tableLog - rest := total - weightTotal - verif := uint32(1) << highBit32(rest) - lastWeight := highBit32(rest) + 1 - if verif != rest { - // last value must be a clean power of 2 - return s, nil, errors.New("corrupt input: last value not power of two") - } - s.huffWeight[s.symbolLen] = uint8(lastWeight) - s.symbolLen++ - rankStats[lastWeight]++ - } - } - - if (rankStats[1] < 2) || (rankStats[1]&1 != 0) { - // by construction : at least 2 elts of rank 1, must be even - return s, nil, errors.New("corrupt input: min elt size, even check failed ") - } - - // TODO: Choose between single/double symbol decoding - - // Calculate starting value for each rank - { - var nextRankStart uint32 - for n := uint8(1); n < s.actualTableLog+1; n++ { - current := nextRankStart - nextRankStart += rankStats[n] << (n - 1) - rankStats[n] = current - } - } - - // fill DTable (always full size) - tSize := 1 << tableLogMax - if len(s.dt.single) != tSize { - s.dt.single = make([]dEntrySingle, tSize) - } - cTable := s.prevTable - if cap(cTable) < maxSymbolValue+1 { - cTable = make([]cTableEntry, 0, maxSymbolValue+1) - } - cTable = cTable[:maxSymbolValue+1] - s.prevTable = cTable[:s.symbolLen] - s.prevTableLog = s.actualTableLog - - for n, w := range s.huffWeight[:s.symbolLen] { - if w == 0 { - cTable[n] = cTableEntry{ - val: 0, - nBits: 0, - } - continue - } - length := (uint32(1) << w) >> 1 - d := dEntrySingle{ - entry: uint16(s.actualTableLog+1-w) | (uint16(n) << 8), - } - - rank := &rankStats[w] - cTable[n] = cTableEntry{ - val: uint16(*rank >> (w - 1)), - nBits: uint8(d.entry), - } - - single := s.dt.single[*rank : *rank+length] - for i := range single { - single[i] = d - } - *rank += length - } - - return s, in, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress1X(in []byte) (out []byte, err error) { - if cap(s.Out) < s.MaxDecodedSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:s.MaxDecodedSize] - s.Out, err = s.Decoder().Decompress1X(s.Out, in) - return s.Out, err -} - -// Decompress4X will decompress a 4X encoded stream. -// Before this is called, the table must be initialized with ReadTable unless -// the encoder re-used the table. -// The length of the supplied input must match the end of a block exactly. -// The destination size of the uncompressed data must be known and provided. -// deprecated: Use the stateless Decoder() to get a concurrent version. -func (s *Scratch) Decompress4X(in []byte, dstSize int) (out []byte, err error) { - if dstSize > s.MaxDecodedSize { - return nil, ErrMaxDecodedSizeExceeded - } - if cap(s.Out) < dstSize { - s.Out = make([]byte, s.MaxDecodedSize) - } - s.Out = s.Out[:0:dstSize] - s.Out, err = s.Decoder().Decompress4X(s.Out, in) - return s.Out, err -} - -// Decoder will return a stateless decoder that can be used by multiple -// decompressors concurrently. -// Before this is called, the table must be initialized with ReadTable. -// The Decoder is still linked to the scratch buffer so that cannot be reused. -// However, it is safe to discard the scratch. -func (s *Scratch) Decoder() *Decoder { - return &Decoder{ - dt: s.dt, - actualTableLog: s.actualTableLog, - bufs: &s.decPool, - } -} - -// Decoder provides stateless decoding. -type Decoder struct { - dt dTable - actualTableLog uint8 - bufs *sync.Pool -} - -func (d *Decoder) buffer() *[4][256]byte { - buf, ok := d.bufs.Get().(*[4][256]byte) - if ok { - return buf - } - return &[4][256]byte{} -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress1X8BitExactly(dst, src) - } - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - switch d.actualTableLog { - case 8: - const shift = 0 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 7: - const shift = 8 - 7 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 6: - const shift = 8 - 6 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 5: - const shift = 8 - 5 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 4: - const shift = 8 - 4 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 3: - const shift = 8 - 3 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 2: - const shift = 8 - 2 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - case 1: - const shift = 8 - 1 - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>(56+shift))] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - default: - d.bufs.Put(bufs) - return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - shift := (8 - d.actualTableLog) & 7 - - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()>>shift] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// decompress1X8Bit will decompress a 1X encoded stream with tablelog <= 8. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { - var br bitReaderBytes - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - dt := d.dt.single[:256] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - const shift = 56 - - //fmt.Printf("mask: %b, tl:%d\n", mask, d.actualTableLog) - for br.off >= 4 { - br.fillFast() - v := dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[uint8(br.value>>shift)] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 4, so uint8 is fine - bitsLeft := int8(uint8(br.off)*8 + (64 - br.bitsRead)) - for bitsLeft > 0 { - if br.bitsRead >= 64-8 { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := dt[br.peekByteFast()] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= int8(nBits) - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { - if d.actualTableLog == 8 { - return d.decompress4X8bitExactly(dst, src) - } - - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - shift := (56 + (8 - d.actualTableLog)) & 63 - - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[uint8(br.value>>shift)].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { - var br [4]bitReaderBytes - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const shift = 56 - const tlSize = 1 << 8 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 4 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - // Interleave 2 decodes. - const stream = 0 - const stream2 = 1 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br1 := &br[stream] - br2 := &br[stream2] - br1.fillFast() - br2.fillFast() - - v := single[uint8(br1.value>>shift)].entry - v2 := single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off] = uint8(v >> 8) - buf[stream2][off] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+1] = uint8(v >> 8) - buf[stream2][off+1] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+2] = uint8(v >> 8) - buf[stream2][off+2] = uint8(v2 >> 8) - - v = single[uint8(br1.value>>shift)].entry - v2 = single[uint8(br2.value>>shift)].entry - br1.bitsRead += uint8(v) - br1.value <<= v & 63 - br2.bitsRead += uint8(v2) - br2.value <<= v2 & 63 - buf[stream][off+3] = uint8(v >> 8) - buf[stream2][off+3] = uint8(v2 >> 8) - } - - off += 4 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - // copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - if br.finished() { - d.bufs.Put(buf) - return nil, io.ErrUnexpectedEOF - } - if br.bitsRead >= 56 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value |= uint64(low) << (br.bitsRead - 32) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value |= uint64(br.in[br.off-1]) << (br.bitsRead - 8) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - v := single[br.peekByteFast()].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - d.bufs.Put(buf) - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// matches will compare a decoding table to a coding table. -// Errors are written to the writer. -// Nothing will be written if table is ok. -func (s *Scratch) matches(ct cTable, w io.Writer) { - if s == nil || len(s.dt.single) == 0 { - return - } - dt := s.dt.single[:1<>8) == byte(sym) { - fmt.Fprintf(w, "symbol %x has decoder, but no encoder\n", sym) - errs++ - break - } - } - if errs == 0 { - broken-- - } - continue - } - // Unused bits in input - ub := tablelog - enc.nBits - top := enc.val << ub - // decoder looks at top bits. - dec := dt[top] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", sym, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", sym, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 0 { - fmt.Fprintf(w, "%d errors in base, stopping\n", errs) - continue - } - // Ensure that all combinations are covered. - for i := uint16(0); i < (1 << ub); i++ { - vval := top | i - dec := dt[vval] - if uint8(dec.entry) != enc.nBits { - fmt.Fprintf(w, "symbol 0x%x bit size mismatch (enc: %d, dec:%d).\n", vval, enc.nBits, uint8(dec.entry)) - errs++ - } - if uint8(dec.entry>>8) != uint8(sym) { - fmt.Fprintf(w, "symbol 0x%x decoder output mismatch (enc: %d, dec:%d).\n", vval, sym, uint8(dec.entry>>8)) - errs++ - } - if errs > 20 { - fmt.Fprintf(w, "%d errors, stopping\n", errs) - break - } - } - if errs == 0 { - ok++ - broken-- - } - } - if broken > 0 { - fmt.Fprintf(w, "%d broken, %d ok\n", broken, ok) - } -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go deleted file mode 100644 index ba7e8e6b02..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ /dev/null @@ -1,226 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// This file contains the specialisation of Decoder.Decompress4X -// and Decoder.Decompress1X that use an asm implementation of thir main loops. -package huff0 - -import ( - "errors" - "fmt" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog > 8. -// -//go:noescape -func decompress4x_main_loop_amd64(ctx *decompress4xContext) - -// decompress4x_8b_loop_x86 is an x86 assembler implementation -// of Decompress4X when tablelog <= 8 which decodes 4 entries -// per loop. -// -//go:noescape -func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) - -// fallback8BitSize is the size where using Go version is faster. -const fallback8BitSize = 800 - -type decompress4xContext struct { - pbr *[4]bitReaderShifted - peekBits uint8 - out *byte - dstEvery int - tbl *dEntrySingle - decoded int - limit *byte -} - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - - use8BitTables := d.actualTableLog <= 8 - if cap(dst) < fallback8BitSize && use8BitTables { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - var decoded int - - if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) { - ctx := decompress4xContext{ - pbr: &br, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - out: &out[0], - dstEvery: dstEvery, - tbl: &single[0], - limit: &out[dstEvery-4], // Always stop decoding when first buffer gets here to avoid writing OOB on last. - } - if use8BitTables { - decompress4x_8b_main_loop_amd64(&ctx) - } else { - decompress4x_main_loop_amd64(&ctx) - } - - decoded = ctx.decoded - out = out[decoded/4:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// decompress4x_main_loop_x86 is an x86 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_amd64(ctx *decompress1xContext) - -// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation -// of Decompress1X when tablelog > 8. -// -//go:noescape -func decompress1x_main_loop_bmi2(ctx *decompress1xContext) - -type decompress1xContext struct { - pbr *bitReaderShifted - peekBits uint8 - out *byte - outCap int - tbl *dEntrySingle - decoded int -} - -// Error reported by asm implementations -const error_max_decoded_size_exeeded = -1 - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:maxDecodedSize] - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - - if maxDecodedSize >= 4 { - ctx := decompress1xContext{ - pbr: &br, - out: &dst[0], - outCap: maxDecodedSize, - peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast() - tbl: &d.dt.single[0], - } - - if cpuinfo.HasBMI2() { - decompress1x_main_loop_bmi2(&ctx) - } else { - decompress1x_main_loop_amd64(&ctx) - } - if ctx.decoded == error_max_decoded_size_exeeded { - return nil, ErrMaxDecodedSizeExceeded - } - - dst = dst[:ctx.decoded] - } - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if len(dst) >= maxDecodedSize { - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s deleted file mode 100644 index c4c7ab2d1f..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s +++ /dev/null @@ -1,830 +0,0 @@ -// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT. - -//go:build amd64 && !appengine && !noasm && gc - -// func decompress4x_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), AX - MOVBQZX 8(AX), DI - MOVQ 16(AX), BX - MOVQ 48(AX), SI - MOVQ 24(AX), R8 - MOVQ 32(AX), R9 - MOVQ (AX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ (R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 24(R10) - ORQ R13, R11 - - // exhausted += (br0.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 48(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 72(R10) - ORQ R13, R11 - - // exhausted += (br1.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 96(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 120(R10) - ORQ R13, R11 - - // exhausted += (br2.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - MOVW AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), AX - SUBQ $0x20, R12 - SUBQ $0x04, AX - MOVQ 144(R10), R13 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (AX)(R13*1), R13 - MOVQ R12, CX - SHLQ CL, R13 - MOVQ AX, 168(R10) - ORQ R13, R11 - - // exhausted += (br3.off < 4) - CMPQ AX, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ DI, CX - MOVQ R11, R13 - SHRQ CL, R13 - - // v1 := table[val1&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry)) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // these two writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVW AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x02, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext) -TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8 - // Preload values - MOVQ ctx+0(FP), CX - MOVBQZX 8(CX), DI - MOVQ 16(CX), BX - MOVQ 48(CX), SI - MOVQ 24(CX), R8 - MOVQ 32(CX), R9 - MOVQ (CX), R10 - - // Main loop -main_loop: - XORL DX, DX - CMPQ BX, SI - SETGE DL - - // br0.fillFast32() - MOVQ 32(R10), R11 - MOVBQZX 40(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill0 - MOVQ 24(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ (R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 24(R10) - ORQ R14, R11 - - // exhausted += (br0.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill0: - // val0 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br0.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br0.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX) - - // update the bitreader structure - MOVQ R11, 32(R10) - MOVB R12, 40(R10) - - // br1.fillFast32() - MOVQ 80(R10), R11 - MOVBQZX 88(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill1 - MOVQ 72(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 48(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 72(R10) - ORQ R14, R11 - - // exhausted += (br1.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill1: - // val0 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br1.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br1.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*1) - - // update the bitreader structure - MOVQ R11, 80(R10) - MOVB R12, 88(R10) - - // br2.fillFast32() - MOVQ 128(R10), R11 - MOVBQZX 136(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill2 - MOVQ 120(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 96(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 120(R10) - ORQ R14, R11 - - // exhausted += (br2.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill2: - // val0 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br2.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br2.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - MOVL AX, (BX)(R8*2) - - // update the bitreader structure - MOVQ R11, 128(R10) - MOVB R12, 136(R10) - - // br3.fillFast32() - MOVQ 176(R10), R11 - MOVBQZX 184(R10), R12 - CMPQ R12, $0x20 - JBE skip_fill3 - MOVQ 168(R10), R13 - SUBQ $0x20, R12 - SUBQ $0x04, R13 - MOVQ 144(R10), R14 - - // b.value |= uint64(low) << (b.bitsRead & 63) - MOVL (R13)(R14*1), R14 - MOVQ R12, CX - SHLQ CL, R14 - MOVQ R13, 168(R10) - ORQ R14, R11 - - // exhausted += (br3.off < 4) - CMPQ R13, $0x04 - ADCB $+0, DL - -skip_fill3: - // val0 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v0 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v0.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - - // val1 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v1 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v1.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // val2 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v2 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v2.entry) - MOVB CH, AH - SHLQ CL, R11 - ADDB CL, R12 - - // val3 := br3.peekTopBits(peekBits) - MOVQ R11, R13 - MOVQ DI, CX - SHRQ CL, R13 - - // v3 := table[val0&mask] - MOVW (R9)(R13*2), CX - - // br3.advance(uint8(v3.entry) - MOVB CH, AL - SHLQ CL, R11 - ADDB CL, R12 - BSWAPL AX - - // these four writes get coalesced - // out[id * dstEvery + 0] = uint8(v0.entry >> 8) - // out[id * dstEvery + 1] = uint8(v1.entry >> 8) - // out[id * dstEvery + 3] = uint8(v2.entry >> 8) - // out[id * dstEvery + 4] = uint8(v3.entry >> 8) - LEAQ (R8)(R8*2), CX - MOVL AX, (BX)(CX*1) - - // update the bitreader structure - MOVQ R11, 176(R10) - MOVB R12, 184(R10) - ADDQ $0x04, BX - TESTB DL, DL - JZ main_loop - MOVQ ctx+0(FP), AX - SUBQ 16(AX), BX - SHLQ $0x02, BX - MOVQ BX, 40(AX) - RET - -// func decompress1x_main_loop_amd64(ctx *decompress1xContext) -TEXT ·decompress1x_main_loop_amd64(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_1_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), R12 - MOVQ R11, CX - SHLQ CL, R12 - ORQ R12, R10 - -bitReader_fillFast_2_end: - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - MOVQ DI, CX - MOVQ R10, R12 - SHRQ CL, R12 - MOVW (SI)(R12*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLQ CL, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET - -// func decompress1x_main_loop_bmi2(ctx *decompress1xContext) -// Requires: BMI2 -TEXT ·decompress1x_main_loop_bmi2(SB), $0-8 - MOVQ ctx+0(FP), CX - MOVQ 16(CX), DX - MOVQ 24(CX), BX - CMPQ BX, $0x04 - JB error_max_decoded_size_exceeded - LEAQ (DX)(BX*1), BX - MOVQ (CX), SI - MOVQ (SI), R8 - MOVQ 24(SI), R9 - MOVQ 32(SI), R10 - MOVBQZX 40(SI), R11 - MOVQ 32(CX), SI - MOVBQZX 8(CX), DI - JMP loop_condition - -main_loop: - // Check if we have room for 4 bytes in the output buffer - LEAQ 4(DX), CX - CMPQ CX, BX - JGE error_max_decoded_size_exceeded - - // Decode 4 values - CMPQ R11, $0x20 - JL bitReader_fillFast_1_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_1_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - CMPQ R11, $0x20 - JL bitReader_fillFast_2_end - SUBQ $0x20, R11 - SUBQ $0x04, R9 - MOVL (R8)(R9*1), CX - SHLXQ R11, CX, CX - ORQ CX, R10 - -bitReader_fillFast_2_end: - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AH - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - SHRXQ DI, R10, CX - MOVW (SI)(CX*2), CX - MOVB CH, AL - MOVBQZX CL, CX - ADDQ CX, R11 - SHLXQ CX, R10, R10 - BSWAPL AX - - // Store the decoded values - MOVL AX, (DX) - ADDQ $0x04, DX - -loop_condition: - CMPQ R9, $0x08 - JGE main_loop - - // Update ctx structure - MOVQ ctx+0(FP), AX - SUBQ 16(AX), DX - MOVQ DX, 40(AX) - MOVQ (AX), AX - MOVQ R9, 24(AX) - MOVQ R10, 32(AX) - MOVB R11, 40(AX) - RET - - // Report error -error_max_decoded_size_exceeded: - MOVQ ctx+0(FP), AX - MOVQ $-1, CX - MOVQ CX, 40(AX) - RET diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go deleted file mode 100644 index 908c17de63..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go +++ /dev/null @@ -1,299 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// This file contains a generic implementation of Decoder.Decompress4X. -package huff0 - -import ( - "errors" - "fmt" -) - -// Decompress4X will decompress a 4X encoded stream. -// The length of the supplied input must match the end of a block exactly. -// The *capacity* of the dst slice must match the destination size of -// the uncompressed data exactly. -func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if len(src) < 6+(4*1) { - return nil, errors.New("input too small") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress4X8bit(dst, src) - } - - var br [4]bitReaderShifted - // Decode "jump table" - start := 6 - for i := 0; i < 3; i++ { - length := int(src[i*2]) | (int(src[i*2+1]) << 8) - if start+length >= len(src) { - return nil, errors.New("truncated input (or invalid offset)") - } - err := br[i].init(src[start : start+length]) - if err != nil { - return nil, err - } - start += length - } - err := br[3].init(src[start:]) - if err != nil { - return nil, err - } - - // destination, offset to match first output - dstSize := cap(dst) - dst = dst[:dstSize] - out := dst - dstEvery := (dstSize + 3) / 4 - - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - single := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - buf := d.buffer() - var off uint8 - var decoded int - - // Decode 2 values from each decoder/loop. - const bufoff = 256 - for { - if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { - break - } - - { - const stream = 0 - const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - { - const stream = 2 - const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() - - val := br[stream].peekBitsFast(d.actualTableLog) - val2 := br[stream2].peekBitsFast(d.actualTableLog) - v := single[val&tlMask] - v2 := single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off] = uint8(v.entry >> 8) - buf[stream2][off] = uint8(v2.entry >> 8) - - val = br[stream].peekBitsFast(d.actualTableLog) - val2 = br[stream2].peekBitsFast(d.actualTableLog) - v = single[val&tlMask] - v2 = single[val2&tlMask] - br[stream].advance(uint8(v.entry)) - br[stream2].advance(uint8(v2.entry)) - buf[stream][off+1] = uint8(v.entry >> 8) - buf[stream2][off+1] = uint8(v2.entry >> 8) - } - - off += 2 - - if off == 0 { - if bufoff > dstEvery { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 1") - } - // There must at least be 3 buffers left. - if len(out)-bufoff < dstEvery*3 { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 2") - } - //copy(out, buf[0][:]) - //copy(out[dstEvery:], buf[1][:]) - //copy(out[dstEvery*2:], buf[2][:]) - //copy(out[dstEvery*3:], buf[3][:]) - *(*[bufoff]byte)(out) = buf[0] - *(*[bufoff]byte)(out[dstEvery:]) = buf[1] - *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2] - *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3] - out = out[bufoff:] - decoded += bufoff * 4 - } - } - if off > 0 { - ioff := int(off) - if len(out) < dstEvery*3+ioff { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 3") - } - copy(out, buf[0][:off]) - copy(out[dstEvery:], buf[1][:off]) - copy(out[dstEvery*2:], buf[2][:off]) - copy(out[dstEvery*3:], buf[3][:off]) - decoded += int(off) * 4 - out = out[off:] - } - - // Decode remaining. - remainBytes := dstEvery - (decoded / 4) - for i := range br { - offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } - br := &br[i] - bitsLeft := br.remaining() - for bitsLeft > 0 { - br.fill() - if offset >= endsAt { - d.bufs.Put(buf) - return nil, errors.New("corruption detected: stream overrun 4") - } - - // Read value and increment offset. - val := br.peekBitsFast(d.actualTableLog) - v := single[val&tlMask].entry - nBits := uint8(v) - br.advance(nBits) - bitsLeft -= uint(nBits) - out[offset] = uint8(v >> 8) - offset++ - } - if offset != endsAt { - d.bufs.Put(buf) - return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) - } - decoded += offset - dstEvery*i - err = br.close() - if err != nil { - return nil, err - } - } - d.bufs.Put(buf) - if dstSize != decoded { - return nil, errors.New("corruption detected: short output block") - } - return dst, nil -} - -// Decompress1X will decompress a 1X encoded stream. -// The cap of the output buffer will be the maximum decompressed size. -// The length of the supplied input must match the end of a block exactly. -func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { - if len(d.dt.single) == 0 { - return nil, errors.New("no table loaded") - } - if use8BitTables && d.actualTableLog <= 8 { - return d.decompress1X8Bit(dst, src) - } - var br bitReaderShifted - err := br.init(src) - if err != nil { - return dst, err - } - maxDecodedSize := cap(dst) - dst = dst[:0] - - // Avoid bounds check by always having full sized table. - const tlSize = 1 << tableLogMax - const tlMask = tlSize - 1 - dt := d.dt.single[:tlSize] - - // Use temp table to avoid bound checks/append penalty. - bufs := d.buffer() - buf := &bufs[0] - var off uint8 - - for br.off >= 8 { - br.fillFast() - v := dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+0] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+1] = uint8(v.entry >> 8) - - // Refill - br.fillFast() - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+2] = uint8(v.entry >> 8) - - v = dt[br.peekBitsFast(d.actualTableLog)&tlMask] - br.advance(uint8(v.entry)) - buf[off+3] = uint8(v.entry >> 8) - - off += 4 - if off == 0 { - if len(dst)+256 > maxDecodedSize { - br.close() - d.bufs.Put(bufs) - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:]...) - } - } - - if len(dst)+int(off) > maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - dst = append(dst, buf[:off]...) - - // br < 8, so uint8 is fine - bitsLeft := uint8(br.off)*8 + 64 - br.bitsRead - for bitsLeft > 0 { - br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - if len(dst) >= maxDecodedSize { - d.bufs.Put(bufs) - br.close() - return nil, ErrMaxDecodedSizeExceeded - } - v := d.dt.single[br.peekBitsFast(d.actualTableLog)&tlMask] - nBits := uint8(v.entry) - br.advance(nBits) - bitsLeft -= nBits - dst = append(dst, uint8(v.entry>>8)) - } - d.bufs.Put(bufs) - return dst, br.close() -} diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go deleted file mode 100644 index 77ecd68e0a..0000000000 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ /dev/null @@ -1,337 +0,0 @@ -// Package huff0 provides fast huffman encoding as used in zstd. -// -// See README.md at https://github.com/klauspost/compress/tree/master/huff0 for details. -package huff0 - -import ( - "errors" - "fmt" - "math" - "math/bits" - "sync" - - "github.com/klauspost/compress/fse" -) - -const ( - maxSymbolValue = 255 - - // zstandard limits tablelog to 11, see: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#huffman-tree-description - tableLogMax = 11 - tableLogDefault = 11 - minTablelog = 5 - huffNodesLen = 512 - - // BlockSizeMax is maximum input size for a single block uncompressed. - BlockSizeMax = 1<<18 - 1 -) - -var ( - // ErrIncompressible is returned when input is judged to be too hard to compress. - ErrIncompressible = errors.New("input is not compressible") - - // ErrUseRLE is returned from the compressor when the input is a single byte value repeated. - ErrUseRLE = errors.New("input is single value repeated") - - // ErrTooBig is return if input is too large for a single block. - ErrTooBig = errors.New("input too big") - - // ErrMaxDecodedSizeExceeded is return if input is too large for a single block. - ErrMaxDecodedSizeExceeded = errors.New("maximum output size exceeded") -) - -type ReusePolicy uint8 - -const ( - // ReusePolicyAllow will allow reuse if it produces smaller output. - ReusePolicyAllow ReusePolicy = iota - - // ReusePolicyPrefer will re-use aggressively if possible. - // This will not check if a new table will produce smaller output, - // except if the current table is impossible to use or - // compressed output is bigger than input. - ReusePolicyPrefer - - // ReusePolicyNone will disable re-use of tables. - // This is slightly faster than ReusePolicyAllow but may produce larger output. - ReusePolicyNone - - // ReusePolicyMust must allow reuse and produce smaller output. - ReusePolicyMust -) - -type Scratch struct { - count [maxSymbolValue + 1]uint32 - - // Per block parameters. - // These can be used to override compression parameters of the block. - // Do not touch, unless you know what you are doing. - - // Out is output buffer. - // If the scratch is re-used before the caller is done processing the output, - // set this field to nil. - // Otherwise the output buffer will be re-used for next Compression/Decompression step - // and allocation will be avoided. - Out []byte - - // OutTable will contain the table data only, if a new table has been generated. - // Slice of the returned data. - OutTable []byte - - // OutData will contain the compressed data. - // Slice of the returned data. - OutData []byte - - // MaxDecodedSize will set the maximum allowed output size. - // This value will automatically be set to BlockSizeMax if not set. - // Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded. - MaxDecodedSize int - - srcLen int - - // MaxSymbolValue will override the maximum symbol value of the next block. - MaxSymbolValue uint8 - - // TableLog will attempt to override the tablelog for the next block. - // Must be <= 11 and >= 5. - TableLog uint8 - - // Reuse will specify the reuse policy - Reuse ReusePolicy - - // WantLogLess allows to specify a log 2 reduction that should at least be achieved, - // otherwise the block will be returned as incompressible. - // The reduction should then at least be (input size >> WantLogLess) - // If WantLogLess == 0 any improvement will do. - WantLogLess uint8 - - symbolLen uint16 // Length of active part of the symbol table. - maxCount int // count of the most probable symbol - clearCount bool // clear count - actualTableLog uint8 // Selected tablelog. - prevTableLog uint8 // Tablelog for previous table - prevTable cTable // Table used for previous compression. - cTable cTable // compression table - dt dTable // decompression table - nodes []nodeElt - tmpOut [4][]byte - fse *fse.Scratch - decPool sync.Pool // *[4][256]byte buffers. - huffWeight [maxSymbolValue + 1]byte -} - -// TransferCTable will transfer the previously used compression table. -func (s *Scratch) TransferCTable(src *Scratch) { - if cap(s.prevTable) < len(src.prevTable) { - s.prevTable = make(cTable, 0, maxSymbolValue+1) - } - s.prevTable = s.prevTable[:len(src.prevTable)] - copy(s.prevTable, src.prevTable) - s.prevTableLog = src.prevTableLog -} - -func (s *Scratch) prepare(in []byte) (*Scratch, error) { - if len(in) > BlockSizeMax { - return nil, ErrTooBig - } - if s == nil { - s = &Scratch{} - } - if s.MaxSymbolValue == 0 { - s.MaxSymbolValue = maxSymbolValue - } - if s.TableLog == 0 { - s.TableLog = tableLogDefault - } - if s.TableLog > tableLogMax || s.TableLog < minTablelog { - return nil, fmt.Errorf(" invalid tableLog %d (%d -> %d)", s.TableLog, minTablelog, tableLogMax) - } - if s.MaxDecodedSize <= 0 || s.MaxDecodedSize > BlockSizeMax { - s.MaxDecodedSize = BlockSizeMax - } - if s.clearCount && s.maxCount == 0 { - for i := range s.count { - s.count[i] = 0 - } - s.clearCount = false - } - if cap(s.Out) == 0 { - s.Out = make([]byte, 0, len(in)) - } - s.Out = s.Out[:0] - - s.OutTable = nil - s.OutData = nil - if cap(s.nodes) < huffNodesLen+1 { - s.nodes = make([]nodeElt, 0, huffNodesLen+1) - } - s.nodes = s.nodes[:0] - if s.fse == nil { - s.fse = &fse.Scratch{} - } - s.srcLen = len(in) - - return s, nil -} - -type cTable []cTableEntry - -func (c cTable) write(s *Scratch) error { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - s.Out = append(s.Out, uint8(len(b))) - s.Out = append(s.Out, b...) - return nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return ErrIncompressible - } - op := s.Out - // special case, pack weights 4 bits/weight. - op = append(op, 128|(maxSymbolValue-1)) - // be sure it doesn't cause msan issue in final combination - huffWeight[maxSymbolValue] = 0 - for n := uint16(0); n < uint16(maxSymbolValue); n += 2 { - op = append(op, (huffWeight[n]<<4)|huffWeight[n+1]) - } - s.Out = op - return nil -} - -func (c cTable) estTableSize(s *Scratch) (sz int, err error) { - var ( - // precomputed conversion table - bitsToWeight [tableLogMax + 1]byte - huffLog = s.actualTableLog - // last weight is not saved. - maxSymbolValue = uint8(s.symbolLen - 1) - huffWeight = s.huffWeight[:256] - ) - const ( - maxFSETableLog = 6 - ) - // convert to weight - bitsToWeight[0] = 0 - for n := uint8(1); n < huffLog+1; n++ { - bitsToWeight[n] = huffLog + 1 - n - } - - // Acquire histogram for FSE. - hist := s.fse.Histogram() - hist = hist[:256] - for i := range hist[:16] { - hist[i] = 0 - } - for n := uint8(0); n < maxSymbolValue; n++ { - v := bitsToWeight[c[n].nBits] & 15 - huffWeight[n] = v - hist[v]++ - } - - // FSE compress if feasible. - if maxSymbolValue >= 2 { - huffMaxCnt := uint32(0) - huffMax := uint8(0) - for i, v := range hist[:16] { - if v == 0 { - continue - } - huffMax = byte(i) - if v > huffMaxCnt { - huffMaxCnt = v - } - } - s.fse.HistogramFinished(huffMax, int(huffMaxCnt)) - s.fse.TableLog = maxFSETableLog - b, err := fse.Compress(huffWeight[:maxSymbolValue], s.fse) - if err == nil && len(b) < int(s.symbolLen>>1) { - sz += 1 + len(b) - return sz, nil - } - // Unable to compress (RLE/uncompressible) - } - // write raw values as 4-bits (max : 15) - if maxSymbolValue > (256 - 128) { - // should not happen : likely means source cannot be compressed - return 0, ErrIncompressible - } - // special case, pack weights 4 bits/weight. - sz += 1 + int(maxSymbolValue/2) - return sz, nil -} - -// estimateSize returns the estimated size in bytes of the input represented in the -// histogram supplied. -func (c cTable) estimateSize(hist []uint32) int { - nbBits := uint32(7) - for i, v := range c[:len(hist)] { - nbBits += uint32(v.nBits) * hist[i] - } - return int(nbBits >> 3) -} - -// minSize returns the minimum possible size considering the shannon limit. -func (s *Scratch) minSize(total int) int { - nbBits := float64(7) - fTotal := float64(total) - for _, v := range s.count[:s.symbolLen] { - n := float64(v) - if n > 0 { - nbBits += math.Log2(fTotal/n) * n - } - } - return int(nbBits) >> 3 -} - -func highBit32(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go deleted file mode 100644 index 3954c51219..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo.go +++ /dev/null @@ -1,34 +0,0 @@ -// Package cpuinfo gives runtime info about the current CPU. -// -// This is a very limited module meant for use internally -// in this project. For more versatile solution check -// https://github.com/klauspost/cpuid. -package cpuinfo - -// HasBMI1 checks whether an x86 CPU supports the BMI1 extension. -func HasBMI1() bool { - return hasBMI1 -} - -// HasBMI2 checks whether an x86 CPU supports the BMI2 extension. -func HasBMI2() bool { - return hasBMI2 -} - -// DisableBMI2 will disable BMI2, for testing purposes. -// Call returned function to restore previous state. -func DisableBMI2() func() { - old := hasBMI2 - hasBMI2 = false - return func() { - hasBMI2 = old - } -} - -// HasBMI checks whether an x86 CPU supports both BMI1 and BMI2 extensions. -func HasBMI() bool { - return HasBMI1() && HasBMI2() -} - -var hasBMI1 bool -var hasBMI2 bool diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go deleted file mode 100644 index e802579c4f..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package cpuinfo - -// go:noescape -func x86extensions() (bmi1, bmi2 bool) - -func init() { - hasBMI1, hasBMI2 = x86extensions() -} diff --git a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s b/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s deleted file mode 100644 index 4465fbe9e9..0000000000 --- a/vendor/github.com/klauspost/compress/internal/cpuinfo/cpuinfo_amd64.s +++ /dev/null @@ -1,36 +0,0 @@ -// +build !appengine -// +build gc -// +build !noasm - -#include "textflag.h" -#include "funcdata.h" -#include "go_asm.h" - -TEXT ·x86extensions(SB), NOSPLIT, $0 - // 1. determine max EAX value - XORQ AX, AX - CPUID - - CMPQ AX, $7 - JB unsupported - - // 2. EAX = 7, ECX = 0 --- see Table 3-8 "Information Returned by CPUID Instruction" - MOVQ $7, AX - MOVQ $0, CX - CPUID - - BTQ $3, BX // bit 3 = BMI1 - SETCS AL - - BTQ $8, BX // bit 8 = BMI2 - SETCS AH - - MOVB AL, bmi1+0(FP) - MOVB AH, bmi2+1(FP) - RET - -unsupported: - XORQ AX, AX - MOVB AL, bmi1+0(FP) - MOVB AL, bmi2+1(FP) - RET diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go deleted file mode 100644 index 40796a49d6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -var ( - // ErrCorrupt reports that the input is invalid. - ErrCorrupt = errors.New("snappy: corrupt input") - // ErrTooLarge reports that the uncompressed length is too large. - ErrTooLarge = errors.New("snappy: decoded block is too large") - // ErrUnsupported reports that the input isn't supported. - ErrUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// DecodedLen returns the length of the decoded block. -func DecodedLen(src []byte) (int, error) { - v, _, err := decodedLen(src) - return v, err -} - -// decodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func decodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrTooLarge - } - return int(v), n, nil -} - -const ( - decodeErrCodeCorrupt = 1 - decodeErrCodeUnsupportedLiteralLength = 2 -) - -// Decode returns the decoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire decoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Decode handles the Snappy block format, not the Snappy stream format. -func Decode(dst, src []byte) ([]byte, error) { - dLen, s, err := decodedLen(src) - if err != nil { - return nil, err - } - if dLen <= len(dst) { - dst = dst[:dLen] - } else { - dst = make([]byte, dLen) - } - switch decode(dst, src[s:]) { - case 0: - return dst, nil - case decodeErrCodeUnsupportedLiteralLength: - return nil, errUnsupportedLiteralLength - } - return nil, ErrCorrupt -} - -// NewReader returns a new Reader that decompresses from r, using the framing -// format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -func NewReader(r io.Reader) *Reader { - return &Reader{ - r: r, - decoded: make([]byte, maxBlockSize), - buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), - } -} - -// Reader is an io.Reader that can read Snappy-compressed bytes. -// -// Reader handles the Snappy stream format, not the Snappy block format. -type Reader struct { - r io.Reader - err error - decoded []byte - buf []byte - // decoded[i:j] contains decoded bytes that have not yet been passed on. - i, j int - readHeader bool -} - -// Reset discards any buffered data, resets all state, and switches the Snappy -// reader to read from r. This permits reusing a Reader rather than allocating -// a new one. -func (r *Reader) Reset(reader io.Reader) { - r.r = reader - r.err = nil - r.i = 0 - r.j = 0 - r.readHeader = false -} - -func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrCorrupt - } - return false - } - return true -} - -func (r *Reader) fill() error { - for r.i >= r.j { - if !r.readFull(r.buf[:4], true) { - return r.err - } - chunkType := r.buf[0] - if !r.readHeader { - if chunkType != chunkTypeStreamIdentifier { - r.err = ErrCorrupt - return r.err - } - r.readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - r.err = ErrUnsupported - return r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[checksumSize:] - - n, err := DecodedLen(buf) - if err != nil { - r.err = err - return r.err - } - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if _, err := Decode(r.decoded, buf); err != nil { - r.err = err - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeUncompressedData: - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < checksumSize { - r.err = ErrCorrupt - return r.err - } - buf := r.buf[:checksumSize] - if !r.readFull(buf, false) { - return r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - checksumSize - if n > len(r.decoded) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.decoded[:n], false) { - return r.err - } - if crc(r.decoded[:n]) != checksum { - r.err = ErrCorrupt - return r.err - } - r.i, r.j = 0, n - continue - - case chunkTypeStreamIdentifier: - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(magicBody) { - r.err = ErrCorrupt - return r.err - } - if !r.readFull(r.buf[:len(magicBody)], false) { - return r.err - } - for i := 0; i < len(magicBody); i++ { - if r.buf[i] != magicBody[i] { - r.err = ErrCorrupt - return r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - r.err = ErrUnsupported - return r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return r.err - } - } - - return nil -} - -// Read satisfies the io.Reader interface. -func (r *Reader) Read(p []byte) (int, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - n := copy(p, r.decoded[r.i:r.j]) - r.i += n - return n, nil -} - -// ReadByte satisfies the io.ByteReader interface. -func (r *Reader) ReadByte() (byte, error) { - if r.err != nil { - return 0, r.err - } - - if err := r.fill(); err != nil { - return 0, err - } - - c := r.decoded[r.i] - r.i++ - return c, nil -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go deleted file mode 100644 index 77395a6b8b..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode_other.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -// decode writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read, and that len(dst) -// equals that length. -// -// It returns 0 on success or a decodeErrCodeXxx error code on failure. -func decode(dst, src []byte) int { - var d, s, offset, length int - for s < len(src) { - switch src[s] & 0x03 { - case tagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - length = int(x) + 1 - if length <= 0 { - return decodeErrCodeUnsupportedLiteralLength - } - if length > len(dst)-d || length > len(src)-s { - return decodeErrCodeCorrupt - } - copy(dst[d:], src[s:s+length]) - d += length - s += length - continue - - case tagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) - - case tagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) - - case tagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - return decodeErrCodeCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) - } - - if offset <= 0 || d < offset || length > len(dst)-d { - return decodeErrCodeCorrupt - } - // Copy from an earlier sub-slice of dst to a later sub-slice. - // If no overlap, use the built-in copy: - if offset >= length { - copy(dst[d:d+length], dst[d-offset:]) - d += length - continue - } - - // Unlike the built-in copy function, this byte-by-byte copy always runs - // forwards, even if the slices overlap. Conceptually, this is: - // - // d += forwardCopy(dst[d:d+length], dst[d-offset:]) - // - // We align the slices into a and b and show the compiler they are the same size. - // This allows the loop to run without bounds checks. - a := dst[d : d+length] - b := dst[d-offset:] - b = b[:len(a)] - for i := range a { - a[i] = b[i] - } - d += length - } - if d != len(dst) { - return decodeErrCodeCorrupt - } - return 0 -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go deleted file mode 100644 index 13c6040a5d..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -import ( - "encoding/binary" - "errors" - "io" -) - -// Encode returns the encoded form of src. The returned slice may be a sub- -// slice of dst if dst was large enough to hold the entire encoded block. -// Otherwise, a newly allocated slice will be returned. -// -// The dst and src must not overlap. It is valid to pass a nil dst. -// -// Encode handles the Snappy block format, not the Snappy stream format. -func Encode(dst, src []byte) []byte { - if n := MaxEncodedLen(len(src)); n < 0 { - panic(ErrTooLarge) - } else if len(dst) < n { - dst = make([]byte, n) - } - - // The block starts with the varint-encoded length of the decompressed bytes. - d := binary.PutUvarint(dst, uint64(len(src))) - - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return dst[:d] -} - -// inputMargin is the minimum number of extra input bytes to keep, inside -// encodeBlock's inner loop. On some architectures, this margin lets us -// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) -// literals can be implemented as a single load to and store from a 16-byte -// register. That literal's actual length can be as short as 1 byte, so this -// can copy up to 15 bytes too much, but that's OK as subsequent iterations of -// the encoding loop will fix up the copy overrun, and this inputMargin ensures -// that we don't overrun the dst and src buffers. -const inputMargin = 16 - 1 - -// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that -// could be encoded with a copy tag. This is the minimum with respect to the -// algorithm used by encodeBlock, not a minimum enforced by the file format. -// -// The encoded output must start with at least a 1 byte literal, as there are -// no previous bytes to copy. A minimal (1 byte) copy after that, generated -// from an emitCopy call in encodeBlock's main loop, would require at least -// another inputMargin bytes, for the reason above: we want any emitLiteral -// calls inside encodeBlock's main loop to use the fast path if possible, which -// requires being able to overrun by inputMargin bytes. Thus, -// minNonLiteralBlockSize equals 1 + 1 + inputMargin. -// -// The C++ code doesn't use this exact threshold, but it could, as discussed at -// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion -// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an -// optimization. It should not affect the encoded form. This is tested by -// TestSameEncodingAsCppShortCopies. -const minNonLiteralBlockSize = 1 + 1 + inputMargin - -// MaxEncodedLen returns the maximum length of a snappy block, given its -// uncompressed length. -// -// It will return a negative value if srcLen is too large to encode. -func MaxEncodedLen(srcLen int) int { - n := uint64(srcLen) - if n > 0xffffffff { - return -1 - } - // Compressed data can be defined as: - // compressed := item* literal* - // item := literal* copy - // - // The trailing literal sequence has a space blowup of at most 62/60 - // since a literal of length 60 needs one tag byte + one extra byte - // for length information. - // - // Item blowup is trickier to measure. Suppose the "copy" op copies - // 4 bytes of data. Because of a special check in the encoding code, - // we produce a 4-byte copy only if the offset is < 65536. Therefore - // the copy op takes 3 bytes to encode, and this type of item leads - // to at most the 62/60 blowup for representing literals. - // - // Suppose the "copy" op copies 5 bytes of data. If the offset is big - // enough, it will take 5 bytes to encode the copy op. Therefore the - // worst case here is a one-byte literal followed by a five-byte copy. - // That is, 6 bytes of input turn into 7 bytes of "compressed" data. - // - // This last factor dominates the blowup, so the final estimate is: - n = 32 + n + n/6 - if n > 0xffffffff { - return -1 - } - return int(n) -} - -var errClosed = errors.New("snappy: Writer is closed") - -// NewWriter returns a new Writer that compresses to w. -// -// The Writer returned does not buffer writes. There is no need to Flush or -// Close such a Writer. -// -// Deprecated: the Writer returned is not suitable for many small writes, only -// for few large writes. Use NewBufferedWriter instead, which is efficient -// regardless of the frequency and shape of the writes, and remember to Close -// that Writer when done. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - obuf: make([]byte, obufLen), - } -} - -// NewBufferedWriter returns a new Writer that compresses to w, using the -// framing format described at -// https://github.com/google/snappy/blob/master/framing_format.txt -// -// The Writer returned buffers writes. Users must call Close to guarantee all -// data has been forwarded to the underlying io.Writer. They may also call -// Flush zero or more times before calling Close. -func NewBufferedWriter(w io.Writer) *Writer { - return &Writer{ - w: w, - ibuf: make([]byte, 0, maxBlockSize), - obuf: make([]byte, obufLen), - } -} - -// Writer is an io.Writer that can write Snappy-compressed bytes. -// -// Writer handles the Snappy stream format, not the Snappy block format. -type Writer struct { - w io.Writer - err error - - // ibuf is a buffer for the incoming (uncompressed) bytes. - // - // Its use is optional. For backwards compatibility, Writers created by the - // NewWriter function have ibuf == nil, do not buffer incoming bytes, and - // therefore do not need to be Flush'ed or Close'd. - ibuf []byte - - // obuf is a buffer for the outgoing (compressed) bytes. - obuf []byte - - // wroteStreamHeader is whether we have written the stream header. - wroteStreamHeader bool -} - -// Reset discards the writer's state and switches the Snappy writer to write to -// w. This permits reusing a Writer rather than allocating a new one. -func (w *Writer) Reset(writer io.Writer) { - w.w = writer - w.err = nil - if w.ibuf != nil { - w.ibuf = w.ibuf[:0] - } - w.wroteStreamHeader = false -} - -// Write satisfies the io.Writer interface. -func (w *Writer) Write(p []byte) (nRet int, errRet error) { - if w.ibuf == nil { - // Do not buffer incoming bytes. This does not perform or compress well - // if the caller of Writer.Write writes many small slices. This - // behavior is therefore deprecated, but still supported for backwards - // compatibility with code that doesn't explicitly Flush or Close. - return w.write(p) - } - - // The remainder of this method is based on bufio.Writer.Write from the - // standard library. - - for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { - var n int - if len(w.ibuf) == 0 { - // Large write, empty buffer. - // Write directly from p to avoid copy. - n, _ = w.write(p) - } else { - n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - w.Flush() - } - nRet += n - p = p[n:] - } - if w.err != nil { - return nRet, w.err - } - n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) - w.ibuf = w.ibuf[:len(w.ibuf)+n] - nRet += n - return nRet, nil -} - -func (w *Writer) write(p []byte) (nRet int, errRet error) { - if w.err != nil { - return 0, w.err - } - for len(p) > 0 { - obufStart := len(magicChunk) - if !w.wroteStreamHeader { - w.wroteStreamHeader = true - copy(w.obuf, magicChunk) - obufStart = 0 - } - - var uncompressed []byte - if len(p) > maxBlockSize { - uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] - } else { - uncompressed, p = p, nil - } - checksum := crc(uncompressed) - - // Compress the buffer, discarding the result if the improvement - // isn't at least 12.5%. - compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) - chunkType := uint8(chunkTypeCompressedData) - chunkLen := 4 + len(compressed) - obufEnd := obufHeaderLen + len(compressed) - if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { - chunkType = chunkTypeUncompressedData - chunkLen = 4 + len(uncompressed) - obufEnd = obufHeaderLen - } - - // Fill in the per-chunk header that comes before the body. - w.obuf[len(magicChunk)+0] = chunkType - w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) - w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) - w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) - w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) - w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) - w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) - w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) - - if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { - w.err = err - return nRet, err - } - if chunkType == chunkTypeUncompressedData { - if _, err := w.w.Write(uncompressed); err != nil { - w.err = err - return nRet, err - } - } - nRet += len(uncompressed) - } - return nRet, nil -} - -// Flush flushes the Writer to its underlying io.Writer. -func (w *Writer) Flush() error { - if w.err != nil { - return w.err - } - if len(w.ibuf) == 0 { - return nil - } - w.write(w.ibuf) - w.ibuf = w.ibuf[:0] - return w.err -} - -// Close calls Flush and then closes the Writer. -func (w *Writer) Close() error { - w.Flush() - ret := w.err - if w.err == nil { - w.err = errClosed - } - return ret -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go deleted file mode 100644 index 2754bac6f1..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright 2016 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package snapref - -func load32(b []byte, i int) uint32 { - b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 -} - -func load64(b []byte, i int) uint64 { - b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 -} - -// emitLiteral writes a literal chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= len(lit) && len(lit) <= 65536 -func emitLiteral(dst, lit []byte) int { - i, n := 0, uint(len(lit)-1) - switch { - case n < 60: - dst[0] = uint8(n)<<2 | tagLiteral - i = 1 - case n < 1<<8: - dst[0] = 60<<2 | tagLiteral - dst[1] = uint8(n) - i = 2 - default: - dst[0] = 61<<2 | tagLiteral - dst[1] = uint8(n) - dst[2] = uint8(n >> 8) - i = 3 - } - return i + copy(dst[i:], lit) -} - -// emitCopy writes a copy chunk and returns the number of bytes written. -// -// It assumes that: -// -// dst is long enough to hold the encoded bytes -// 1 <= offset && offset <= 65535 -// 4 <= length && length <= 65535 -func emitCopy(dst []byte, offset, length int) int { - i := 0 - // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The - // threshold for this loop is a little higher (at 68 = 64 + 4), and the - // length emitted down below is a little lower (at 60 = 64 - 4), because - // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed - // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as - // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as - // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a - // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an - // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. - for length >= 68 { - // Emit a length 64 copy, encoded as 3 bytes. - dst[i+0] = 63<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 64 - } - if length > 64 { - // Emit a length 60 copy, encoded as 3 bytes. - dst[i+0] = 59<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - i += 3 - length -= 60 - } - if length >= 12 || offset >= 2048 { - // Emit the remaining copy, encoded as 3 bytes. - dst[i+0] = uint8(length-1)<<2 | tagCopy2 - dst[i+1] = uint8(offset) - dst[i+2] = uint8(offset >> 8) - return i + 3 - } - // Emit the remaining copy, encoded as 2 bytes. - dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 - dst[i+1] = uint8(offset) - return i + 2 -} - -func hash(u, shift uint32) uint32 { - return (u * 0x1e35a7bd) >> shift -} - -// EncodeBlockInto exposes encodeBlock but checks dst size. -func EncodeBlockInto(dst, src []byte) (d int) { - if MaxEncodedLen(len(src)) > len(dst) { - return 0 - } - - // encodeBlock breaks on too big blocks, so split. - for len(src) > 0 { - p := src - src = nil - if len(p) > maxBlockSize { - p, src = p[:maxBlockSize], p[maxBlockSize:] - } - if len(p) < minNonLiteralBlockSize { - d += emitLiteral(dst[d:], p) - } else { - d += encodeBlock(dst[d:], p) - } - } - return d -} - -// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It -// assumes that the varint-encoded length of the decompressed bytes has already -// been written. -// -// It also assumes that: -// -// len(dst) >= MaxEncodedLen(len(src)) && -// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize -func encodeBlock(dst, src []byte) (d int) { - // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. - // The table element type is uint16, as s < sLimit and sLimit < len(src) - // and len(src) <= maxBlockSize and maxBlockSize == 65536. - const ( - maxTableSize = 1 << 14 - // tableMask is redundant, but helps the compiler eliminate bounds - // checks. - tableMask = maxTableSize - 1 - ) - shift := uint32(32 - 8) - for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { - shift-- - } - // In Go, all array elements are zero-initialized, so there is no advantage - // to a smaller tableSize per se. However, it matches the C++ algorithm, - // and in the asm versions of this code, we can get away with zeroing only - // the first tableSize elements. - var table [maxTableSize]uint16 - - // sLimit is when to stop looking for offset/length copies. The inputMargin - // lets us use a fast path for emitLiteral in the main loop, while we are - // looking for copies. - sLimit := len(src) - inputMargin - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := 0 - - // The encoded form must start with a literal, as there are no previous - // bytes to copy, so we start looking for hash matches at s == 1. - s := 1 - nextHash := hash(load32(src, s), shift) - - for { - // Copied from the C++ snappy implementation: - // - // Heuristic match skipping: If 32 bytes are scanned with no matches - // found, start looking only at every other byte. If 32 more bytes are - // scanned (or skipped), look at every third byte, etc.. When a match - // is found, immediately go back to looking at every byte. This is a - // small loss (~5% performance, ~0.1% density) for compressible data - // due to more bookkeeping, but for non-compressible data (such as - // JPEG) it's a huge win since the compressor quickly "realizes" the - // data is incompressible and doesn't bother looking for matches - // everywhere. - // - // The "skip" variable keeps track of how many bytes there are since - // the last match; dividing it by 32 (ie. right-shifting by five) gives - // the number of bytes to move ahead for each iteration. - skip := 32 - - nextS := s - candidate := 0 - for { - s = nextS - bytesBetweenHashLookups := skip >> 5 - nextS = s + bytesBetweenHashLookups - skip += bytesBetweenHashLookups - if nextS > sLimit { - goto emitRemainder - } - candidate = int(table[nextHash&tableMask]) - table[nextHash&tableMask] = uint16(s) - nextHash = hash(load32(src, nextS), shift) - if load32(src, s) == load32(src, candidate) { - break - } - } - - // A 4-byte match has been found. We'll later see if more than 4 bytes - // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit - // them as literal bytes. - d += emitLiteral(dst[d:], src[nextEmit:s]) - - // Call emitCopy, and then see if another emitCopy could be our next - // move. Repeat until we find no match for the input immediately after - // what was consumed by the last emitCopy call. - // - // If we exit this loop normally then we need to call emitLiteral next, - // though we don't yet know how big the literal will be. We handle that - // by proceeding to the next iteration of the main loop. We also can - // exit this loop via goto if we get close to exhausting the input. - for { - // Invariant: we have a 4-byte match at s, and no need to emit any - // literal bytes prior to s. - base := s - - // Extend the 4-byte match as long as possible. - // - // This is an inlined version of: - // s = extendMatch(src, candidate+4, s+4) - s += 4 - for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { - } - - d += emitCopy(dst[d:], base-candidate, s-base) - nextEmit = s - if s >= sLimit { - goto emitRemainder - } - - // We could immediately start working at s now, but to improve - // compression we first update the hash table at s-1 and at s. If - // another emitCopy is not our next move, also calculate nextHash - // at s+1. At least on GOARCH=amd64, these three hash calculations - // are faster as one load64 call (with some shifts) instead of - // three load32 calls. - x := load64(src, s-1) - prevHash := hash(uint32(x>>0), shift) - table[prevHash&tableMask] = uint16(s - 1) - currHash := hash(uint32(x>>8), shift) - candidate = int(table[currHash&tableMask]) - table[currHash&tableMask] = uint16(s) - if uint32(x>>8) != load32(src, candidate) { - nextHash = hash(uint32(x>>16), shift) - s++ - break - } - } - } - -emitRemainder: - if nextEmit < len(src) { - d += emitLiteral(dst[d:], src[nextEmit:]) - } - return d -} diff --git a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go b/vendor/github.com/klauspost/compress/internal/snapref/snappy.go deleted file mode 100644 index 34d01f4aa6..0000000000 --- a/vendor/github.com/klauspost/compress/internal/snapref/snappy.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2011 The Snappy-Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package snapref implements the Snappy compression format. It aims for very -// high speeds and reasonable compression. -// -// There are actually two Snappy formats: block and stream. They are related, -// but different: trying to decompress block-compressed data as a Snappy stream -// will fail, and vice versa. The block format is the Decode and Encode -// functions and the stream format is the Reader and Writer types. -// -// The block format, the more common case, is used when the complete size (the -// number of bytes) of the original data is known upfront, at the time -// compression starts. The stream format, also known as the framing format, is -// for when that isn't always true. -// -// The canonical, C++ implementation is at https://github.com/google/snappy and -// it only implements the block format. -package snapref - -import ( - "hash/crc32" -) - -/* -Each encoded block begins with the varint-encoded length of the decoded data, -followed by a sequence of chunks. Chunks begin and end on byte boundaries. The -first byte of each chunk is broken into its 2 least and 6 most significant bits -called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. -Zero means a literal tag. All other values mean a copy tag. - -For literal tags: - - If m < 60, the next 1 + m bytes are literal bytes. - - Otherwise, let n be the little-endian unsigned integer denoted by the next - m - 59 bytes. The next 1 + n bytes after that are literal bytes. - -For copy tags, length bytes are copied from offset bytes ago, in the style of -Lempel-Ziv compression algorithms. In particular: - - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). - The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 - of the offset. The next byte is bits 0-7 of the offset. - - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). - The length is 1 + m. The offset is the little-endian unsigned integer - denoted by the next 2 bytes. - - For l == 3, this tag is a legacy format that is no longer issued by most - encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in - [1, 65). The length is 1 + m. The offset is the little-endian unsigned - integer denoted by the next 4 bytes. -*/ -const ( - tagLiteral = 0x00 - tagCopy1 = 0x01 - tagCopy2 = 0x02 - tagCopy4 = 0x03 -) - -const ( - checksumSize = 4 - chunkHeaderSize = 4 - magicChunk = "\xff\x06\x00\x00" + magicBody - magicBody = "sNaPpY" - - // maxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - maxBlockSize = 65536 - - // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - maxEncodedLenOfMaxBlockSize = 76490 - - obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize - obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func crc(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return uint32(c>>15|c<<17) + 0xa282ead8 -} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod deleted file mode 100644 index 5a4412f907..0000000000 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ /dev/null @@ -1,4 +0,0 @@ -module github.com/klauspost/compress - -go 1.19 - diff --git a/vendor/github.com/klauspost/compress/s2sx.sum b/vendor/github.com/klauspost/compress/s2sx.sum deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md deleted file mode 100644 index 92e2347bbc..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ /dev/null @@ -1,441 +0,0 @@ -# zstd - -[Zstandard](https://facebook.github.io/zstd/) is a real-time compression algorithm, providing high compression ratios. -It offers a very wide range of compression / speed trade-off, while being backed by a very fast decoder. -A high performance compression algorithm is implemented. For now focused on speed. - -This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. - -This package is pure Go and without use of "unsafe". - -The `zstd` package is provided as open source software using a Go standard license. - -Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors. - -For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go). - -## Installation - -Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`. - -[![Go Reference](https://pkg.go.dev/badge/github.com/klauspost/compress/zstd.svg)](https://pkg.go.dev/github.com/klauspost/compress/zstd) - -## Compressor - -### Status: - -STABLE - there may always be subtle bugs, a wide variety of content has been tested and the library is actively -used by several projects. This library is being [fuzz-tested](https://github.com/klauspost/compress-fuzz) for all updates. - -There may still be specific combinations of data types/size/settings that could lead to edge cases, -so as always, testing is recommended. - -For now, a high speed (fastest) and medium-fast (default) compressor has been implemented. - -* The "Fastest" compression ratio is roughly equivalent to zstd level 1. -* The "Default" compression ratio is roughly equivalent to zstd level 3 (default). -* The "Better" compression ratio is roughly equivalent to zstd level 7. -* The "Best" compression ratio is roughly equivalent to zstd level 11. - -In terms of speed, it is typically 2x as fast as the stdlib deflate/gzip in its fastest mode. -The compression ratio compared to stdlib is around level 3, but usually 3x as fast. - - -### Usage - -An Encoder can be used for either compressing a stream via the -`io.WriteCloser` interface supported by the Encoder or as multiple independent -tasks via the `EncodeAll` function. -Smaller encodes are encouraged to use the EncodeAll function. -Use `NewWriter` to create a new instance that can be used for both. - -To create a writer with default options, do like this: - -```Go -// Compress input to output. -func Compress(in io.Reader, out io.Writer) error { - enc, err := zstd.NewWriter(out) - if err != nil { - return err - } - _, err = io.Copy(enc, in) - if err != nil { - enc.Close() - return err - } - return enc.Close() -} -``` - -Now you can encode by writing data to `enc`. The output will be finished writing when `Close()` is called. -Even if your encode fails, you should still call `Close()` to release any resources that may be held up. - -The above is fine for big encodes. However, whenever possible try to *reuse* the writer. - -To reuse the encoder, you can use the `Reset(io.Writer)` function to change to another output. -This will allow the encoder to reuse all resources and avoid wasteful allocations. - -Currently stream encoding has 'light' concurrency, meaning up to 2 goroutines can be working on part -of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is likely to change -in the future. So if you want to limit concurrency for future updates, specify the concurrency -you would like. - -If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` -which will compress input as each block is completed, blocking on writes until each has completed. - -You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined -compression settings can be specified. - -#### Future Compatibility Guarantees - -This will be an evolving project. When using this package it is important to note that both the compression efficiency and speed may change. - -The goal will be to keep the default efficiency at the default zstd (level 3). -However the encoding should never be assumed to remain the same, -and you should not use hashes of compressed output for similarity checks. - -The Encoder can be assumed to produce the same output from the exact same code version. -However, the may be modes in the future that break this, -although they will not be enabled without an explicit option. - -This encoder is not designed to (and will probably never) output the exact same bitstream as the reference encoder. - -Also note, that the cgo decompressor currently does not [report all errors on invalid input](https://github.com/DataDog/zstd/issues/59), -[omits error checks](https://github.com/DataDog/zstd/issues/61), [ignores checksums](https://github.com/DataDog/zstd/issues/43) -and seems to ignore concatenated streams, even though [it is part of the spec](https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frames). - -#### Blocks - -For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. - -`EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently. -Each call will only run on a same goroutine as the caller. - -Encoded blocks can be concatenated and the result will be the combined input stream. -Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. - -Especially when encoding blocks you should take special care to reuse the encoder. -This will effectively make it run without allocations after a warmup period. -To make it run completely without allocations, supply a destination buffer with space for all content. - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a writer that caches compressors. -// For this operation type we supply a nil Reader. -var encoder, _ = zstd.NewWriter(nil) - -// Compress a buffer. -// If you have a destination buffer, the allocation in the call can also be eliminated. -func Compress(src []byte) []byte { - return encoder.EncodeAll(src, make([]byte, 0, len(src))) -} -``` - -You can control the maximum number of concurrent encodes using the `WithEncoderConcurrency(n)` -option when creating the writer. - -Using the Encoder for both a stream and individual blocks concurrently is safe. - -### Performance - -I have collected some speed examples to compare speed and compression against other compressors. - -* `file` is the input file. -* `out` is the compressor used. `zskp` is this package. `zstd` is the Datadog cgo library. `gzstd/gzkp` is gzip standard and this library. -* `level` is the compression level used. For `zskp` level 1 is "fastest", level 2 is "default"; 3 is "better", 4 is "best". -* `insize`/`outsize` is the input/output size. -* `millis` is the number of milliseconds used for compression. -* `mb/s` is megabytes (2^20 bytes) per second. - -``` -Silesia Corpus: -http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip - -This package: -file out level insize outsize millis mb/s -silesia.tar zskp 1 211947520 73821326 634 318.47 -silesia.tar zskp 2 211947520 67655404 1508 133.96 -silesia.tar zskp 3 211947520 64746933 3000 67.37 -silesia.tar zskp 4 211947520 60073508 16926 11.94 - -cgo zstd: -silesia.tar zstd 1 211947520 73605392 543 371.56 -silesia.tar zstd 3 211947520 66793289 864 233.68 -silesia.tar zstd 6 211947520 62916450 1913 105.66 -silesia.tar zstd 9 211947520 60212393 5063 39.92 - -gzip, stdlib/this package: -silesia.tar gzstd 1 211947520 80007735 1498 134.87 -silesia.tar gzkp 1 211947520 80088272 1009 200.31 - -GOB stream of binary data. Highly compressible. -https://files.klauspost.com/compress/gob-stream.7z - -file out level insize outsize millis mb/s -gob-stream zskp 1 1911399616 233948096 3230 564.34 -gob-stream zskp 2 1911399616 203997694 4997 364.73 -gob-stream zskp 3 1911399616 173526523 13435 135.68 -gob-stream zskp 4 1911399616 162195235 47559 38.33 - -gob-stream zstd 1 1911399616 249810424 2637 691.26 -gob-stream zstd 3 1911399616 208192146 3490 522.31 -gob-stream zstd 6 1911399616 193632038 6687 272.56 -gob-stream zstd 9 1911399616 177620386 16175 112.70 - -gob-stream gzstd 1 1911399616 357382013 9046 201.49 -gob-stream gzkp 1 1911399616 359136669 4885 373.08 - -The test data for the Large Text Compression Benchmark is the first -10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. -http://mattmahoney.net/dc/textdata.html - -file out level insize outsize millis mb/s -enwik9 zskp 1 1000000000 343833605 3687 258.64 -enwik9 zskp 2 1000000000 317001237 7672 124.29 -enwik9 zskp 3 1000000000 291915823 15923 59.89 -enwik9 zskp 4 1000000000 261710291 77697 12.27 - -enwik9 zstd 1 1000000000 358072021 3110 306.65 -enwik9 zstd 3 1000000000 313734672 4784 199.35 -enwik9 zstd 6 1000000000 295138875 10290 92.68 -enwik9 zstd 9 1000000000 278348700 28549 33.40 - -enwik9 gzstd 1 1000000000 382578136 8608 110.78 -enwik9 gzkp 1 1000000000 382781160 5628 169.45 - -Highly compressible JSON file. -https://files.klauspost.com/compress/github-june-2days-2019.json.zst - -file out level insize outsize millis mb/s -github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17 -github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49 -github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41 -github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18 - -github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00 -github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57 -github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18 -github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16 - -github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32 -github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16 - -VM Image, Linux mint with a few installed applications: -https://files.klauspost.com/compress/rawstudio-mint14.7z - -file out level insize outsize millis mb/s -rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29 -rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15 -rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49 -rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41 - -rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27 -rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92 -rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77 -rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91 - -rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96 -rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26 - -CSV data: -https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst - -file out level insize outsize millis mb/s -nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17 -nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50 -nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79 -nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98 - -nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18 -nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07 -nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27 -nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12 - -nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11 -nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68 -``` - -## Decompressor - -Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested. - -This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz), -kindly supplied by [fuzzit.dev](https://fuzzit.dev/). -The main purpose of the fuzz testing is to ensure that it is not possible to crash the decoder, -or run it past its limits with ANY input provided. - -### Usage - -The package has been designed for two main usages, big streams of data and smaller in-memory buffers. -There are two main usages of the package for these. Both of them are accessed by creating a `Decoder`. - -For streaming use a simple setup could look like this: - -```Go -import "github.com/klauspost/compress/zstd" - -func Decompress(in io.Reader, out io.Writer) error { - d, err := zstd.NewReader(in) - if err != nil { - return err - } - defer d.Close() - - // Copy content... - _, err = io.Copy(out, d) - return err -} -``` - -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, -when running with default settings. -Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. - -Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. -However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data -as it is being requested only. - -For decoding buffers, it could look something like this: - -```Go -import "github.com/klauspost/compress/zstd" - -// Create a reader that caches decompressors. -// For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0)) - -// Decompress a buffer. We don't supply a destination buffer, -// so it will be allocated by the decoder. -func Decompress(src []byte) ([]byte, error) { - return decoder.DecodeAll(src, nil) -} -``` - -Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. -By default 4 decompressors will be created. - -It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. -It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. - -### Dictionaries - -Data compressed with [dictionaries](https://github.com/facebook/zstd#the-case-for-small-data-compression) can be decompressed. - -Dictionaries are added individually to Decoders. -Dictionaries are generated by the `zstd --train` command and contains an initial state for the decoder. -To add a dictionary use the `WithDecoderDicts(dicts ...[]byte)` option with the dictionary data. -Several dictionaries can be added at once. - -The dictionary will be used automatically for the data that specifies them. -A re-used Decoder will still contain the dictionaries registered. - -When registering multiple dictionaries with the same ID, the last one will be used. - -It is possible to use dictionaries when compressing data. - -To enable a dictionary use `WithEncoderDict(dict []byte)`. Here only one dictionary will be used -and it will likely be used even if it doesn't improve compression. - -The used dictionary must be used to decompress the content. - -For any real gains, the dictionary should be built with similar data. -If an unsuitable dictionary is used the output may be slightly larger than using no dictionary. -Use the [zstd commandline tool](https://github.com/facebook/zstd/releases) to build a dictionary from sample data. -For information see [zstd dictionary information](https://github.com/facebook/zstd#the-case-for-small-data-compression). - -For now there is a fixed startup performance penalty for compressing content with dictionaries. -This will likely be improved over time. Just be aware to test performance when implementing. - -### Allocation-less operation - -The decoder has been designed to operate without allocations after a warmup. - -This means that you should *store* the decoder for best performance. -To re-use a stream decoder, use the `Reset(r io.Reader) error` to switch to another stream. -A decoder can safely be re-used even if the previous stream failed. - -To release the resources, you must call the `Close()` function on a decoder. -After this it can *no longer be reused*, but all running goroutines will be stopped. -So you *must* use this if you will no longer need the Reader. - -For decompressing smaller buffers a single decoder can be used. -When decoding buffers, you can supply a destination slice with length 0 and your expected capacity. -In this case no unneeded allocations should be made. - -### Concurrency - -The buffer decoder does everything on the same goroutine and does nothing concurrently. -It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. - -The stream decoder will create goroutines that: - -1) Reads input and splits the input into blocks. -2) Decompression of literals. -3) Decompression of sequences. -4) Reconstruction of output stream. - -So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. - -The concurrency level will, for streams, determine how many blocks ahead the compression will start. - -Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. - -In practice this means that concurrency is often limited to utilizing about 3 cores effectively. - -### Benchmarks - -The first two are streaming decodes and the last are smaller inputs. - -Running on AMD Ryzen 9 3950X 16-Core Processor. AMD64 assembly used. - -``` -BenchmarkDecoderSilesia-32 5 206878840 ns/op 1024.50 MB/s 49808 B/op 43 allocs/op -BenchmarkDecoderEnwik9-32 1 1271809000 ns/op 786.28 MB/s 72048 B/op 52 allocs/op - -Concurrent blocks, performance: - -BenchmarkDecoder_DecodeAllParallel/kppkn.gtb.zst-32 67356 17857 ns/op 10321.96 MB/s 22.48 pct 102 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/geo.protodata.zst-32 266656 4421 ns/op 26823.21 MB/s 11.89 pct 19 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/plrabn12.txt.zst-32 20992 56842 ns/op 8477.17 MB/s 39.90 pct 754 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/lcet10.txt.zst-32 27456 43932 ns/op 9714.01 MB/s 33.27 pct 524 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/asyoulik.txt.zst-32 78432 15047 ns/op 8319.15 MB/s 40.34 pct 66 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/alice29.txt.zst-32 65800 18436 ns/op 8249.63 MB/s 37.75 pct 88 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html_x_4.zst-32 102993 11523 ns/op 35546.09 MB/s 3.637 pct 143 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/paper-100k.pdf.zst-32 1000000 1070 ns/op 95720.98 MB/s 80.53 pct 3 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/fireworks.jpeg.zst-32 749802 1752 ns/op 70272.35 MB/s 100.0 pct 5 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/urls.10K.zst-32 22640 52934 ns/op 13263.37 MB/s 26.25 pct 1014 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/html.zst-32 226412 5232 ns/op 19572.27 MB/s 14.49 pct 20 B/op 0 allocs/op -BenchmarkDecoder_DecodeAllParallel/comp-data.bin.zst-32 923041 1276 ns/op 3194.71 MB/s 31.26 pct 0 B/op 0 allocs/op -``` - -This reflects the performance around May 2022, but this may be out of date. - -## Zstd inside ZIP files - -It is possible to use zstandard to compress individual files inside zip archives. -While this isn't widely supported it can be useful for internal files. - -To support the compression and decompression of these files you must register a compressor and decompressor. - -It is highly recommended registering the (de)compressors on individual zip Reader/Writer and NOT -use the global registration functions. The main reason for this is that 2 registrations from -different packages will result in a panic. - -It is a good idea to only have a single compressor and decompressor, since they can be used for multiple zip -files concurrently, and using a single instance will allow reusing some resources. - -See [this example](https://pkg.go.dev/github.com/klauspost/compress/zstd#example-ZipCompressor) for -how to compress and decompress files inside zip archives. - -# Contributions - -Contributions are always welcome. -For new features/fixes, remember to add tests and for performance enhancements include benchmarks. - -For general feedback and experience reports, feel free to open an issue or write me on [Twitter](https://twitter.com/sh0dan). - -This package includes the excellent [`github.com/cespare/xxhash`](https://github.com/cespare/xxhash) package Copyright (c) 2016 Caleb Spare. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go deleted file mode 100644 index 25ca983941..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "math/bits" -) - -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - value uint64 // Maybe use [16]byte, but shifting is awkward. - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBits(uint32(v))) - return nil -} - -// getBits will return n bits. n can be 0. -func (b *bitReader) getBits(n uint8) int { - if n == 0 /*|| b.bitsRead >= 64 */ { - return 0 - } - return int(b.get32BitsFast(n)) -} - -// get32BitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) get32BitsFast(n uint8) uint32 { - const regMask = 64 - 1 - v := uint32((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - b.bitsRead += n - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) - b.bitsRead = 0 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - return - } - - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 -} - -// overread returns true if more bits have been requested than is on the stream. -func (b *bitReader) overread() bool { - return b.bitsRead > 64 -} - -// remain returns the number of bits remaining. -func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if !b.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) - } - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - -func highBits(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go deleted file mode 100644 index 1952f175b0..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2018 Klaus Post. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -// Based on work Copyright (c) 2013, Yann Collet, released under BSD License. - -package zstd - -// bitWriter will write bits. -// First bit will be LSB of the first byte of output. -type bitWriter struct { - bitContainer uint64 - nBits uint8 - out []byte -} - -// bitMask16 is bitmasks. Has extra to avoid bounds check. -var bitMask16 = [32]uint16{ - 0, 1, 3, 7, 0xF, 0x1F, - 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF, - 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, - 0xFFFF, 0xFFFF} /* up to 16 bits */ - -var bitMask32 = [32]uint32{ - 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF, - 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, - 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF, - 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF, -} // up to 32 bits - -// addBits16NC will add up to 16 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16NC(value uint16, bits uint8) { - b.bitContainer |= uint64(value&bitMask16[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits32NC will add up to 31 bits. -// It will not check if there is space for them, -// so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits32NC(value uint32, bits uint8) { - b.bitContainer |= uint64(value&bitMask32[bits&31]) << (b.nBits & 63) - b.nBits += bits -} - -// addBits64NC will add up to 64 bits. -// There must be space for 32 bits. -func (b *bitWriter) addBits64NC(value uint64, bits uint8) { - if bits <= 31 { - b.addBits32Clean(uint32(value), bits) - return - } - b.addBits32Clean(uint32(value), 32) - b.flush32() - b.addBits32Clean(uint32(value>>32), bits-32) -} - -// addBits32Clean will add up to 32 bits. -// It will not check if there is space for them. -// The input must not contain more bits than specified. -func (b *bitWriter) addBits32Clean(value uint32, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated. -// It will not check if there is space for them, so the caller must ensure that it has flushed recently. -func (b *bitWriter) addBits16Clean(value uint16, bits uint8) { - b.bitContainer |= uint64(value) << (b.nBits & 63) - b.nBits += bits -} - -// flush32 will flush out, so there are at least 32 bits available for writing. -func (b *bitWriter) flush32() { - if b.nBits < 32 { - return - } - b.out = append(b.out, - byte(b.bitContainer), - byte(b.bitContainer>>8), - byte(b.bitContainer>>16), - byte(b.bitContainer>>24)) - b.nBits -= 32 - b.bitContainer >>= 32 -} - -// flushAlign will flush remaining full bytes and align to next byte boundary. -func (b *bitWriter) flushAlign() { - nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { - b.out = append(b.out, byte(b.bitContainer>>(i*8))) - } - b.nBits = 0 - b.bitContainer = 0 -} - -// close will write the alignment bit and write the final byte(s) -// to the output. -func (b *bitWriter) close() { - // End mark - b.addBits16Clean(1, 1) - // flush until next byte. - b.flushAlign() -} - -// reset and continue writing by appending to out. -func (b *bitWriter) reset(out []byte) { - b.bitContainer = 0 - b.nBits = 0 - b.out = out -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go deleted file mode 100644 index 9c28840c3b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ /dev/null @@ -1,731 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path/filepath" - "sync" - - "github.com/klauspost/compress/huff0" - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type blockType uint8 - -//go:generate stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex - -const ( - blockTypeRaw blockType = iota - blockTypeRLE - blockTypeCompressed - blockTypeReserved -) - -type literalsBlockType uint8 - -const ( - literalsBlockRaw literalsBlockType = iota - literalsBlockRLE - literalsBlockCompressed - literalsBlockTreeless -) - -const ( - // maxCompressedBlockSize is the biggest allowed compressed block size (128KB) - maxCompressedBlockSize = 128 << 10 - - compressedBlockOverAlloc = 16 - maxCompressedBlockSizeAlloc = 128<<10 + compressedBlockOverAlloc - - // Maximum possible block size (all Raw+Uncompressed). - maxBlockSize = (1 << 21) - 1 - - maxMatchLen = 131074 - maxSequences = 0x7f00 + 0xffff - - // We support slightly less than the reference decoder to be able to - // use ints on 32 bit archs. - maxOffsetBits = 30 -) - -var ( - huffDecoderPool = sync.Pool{New: func() interface{} { - return &huff0.Scratch{} - }} - - fseDecoderPool = sync.Pool{New: func() interface{} { - return &fseDecoder{} - }} -) - -type blockDec struct { - // Raw source data of the block. - data []byte - dataStorage []byte - - // Destination of the decoded data. - dst []byte - - // Buffer for literals data. - literalBuf []byte - - // Window size of the block. - WindowSize uint64 - - err error - - // Check against this crc, if hasCRC is true. - checkCRC uint32 - hasCRC bool - - // Frame to use for singlethreaded decoding. - // Should not be used by the decoder itself since parent may be another frame. - localFrame *frameDec - - sequence []seqVals - - async struct { - newHist *history - literals []byte - seqData []byte - seqSize int // Size of uncompressed sequences - fcs uint64 - } - - // Block is RLE, this is the size. - RLESize uint32 - - Type blockType - - // Is this the last block of a frame? - Last bool - - // Use less memory - lowMem bool -} - -func (b *blockDec) String() string { - if b == nil { - return "" - } - return fmt.Sprintf("Steam Size: %d, Type: %v, Last: %t, Window: %d", len(b.data), b.Type, b.Last, b.WindowSize) -} - -func newBlockDec(lowMem bool) *blockDec { - b := blockDec{ - lowMem: lowMem, - } - return &b -} - -// reset will reset the block. -// Input must be a start of a block and will be at the end of the block when returned. -func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { - b.WindowSize = windowSize - tmp, err := br.readSmall(3) - if err != nil { - println("Reading block header:", err) - return err - } - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - b.Last = bh&1 != 0 - b.Type = blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - maxSize := maxCompressedBlockSizeAlloc - switch b.Type { - case blockTypeReserved: - return ErrReservedBlockType - case blockTypeRLE: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - b.RLESize = uint32(cSize) - if b.lowMem { - maxSize = cSize - } - cSize = 1 - case blockTypeCompressed: - if debugDecoder { - println("Data size on stream:", cSize) - } - b.RLESize = 0 - maxSize = maxCompressedBlockSizeAlloc - if windowSize < maxCompressedBlockSize && b.lowMem { - maxSize = int(windowSize) + compressedBlockOverAlloc - } - if cSize > maxCompressedBlockSize || uint64(cSize) > b.WindowSize { - if debugDecoder { - printf("compressed block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrCompressedSizeTooBig - } - // Empty compressed blocks must at least be 2 bytes - // for Literals_Block_Type and one for Sequences_Section_Header. - if cSize < 2 { - return ErrBlockTooSmall - } - case blockTypeRaw: - if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { - if debugDecoder { - printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) - } - return ErrWindowSizeExceeded - } - - b.RLESize = 0 - // We do not need a destination for raw blocks. - maxSize = -1 - default: - panic("Invalid block type") - } - - // Read block data. - if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize { - // byteBuf doesn't need a destination buffer. - if b.lowMem || cSize > maxCompressedBlockSize { - b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc) - } else { - b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc) - } - } - b.data, err = br.readBig(cSize, b.dataStorage) - if err != nil { - if debugDecoder { - println("Reading block:", err, "(", cSize, ")", len(b.data)) - printf("%T", br) - } - return err - } - if cap(b.dst) <= maxSize { - b.dst = make([]byte, 0, maxSize+1) - } - return nil -} - -// sendEOF will make the decoder send EOF on this frame. -func (b *blockDec) sendErr(err error) { - b.Last = true - b.Type = blockTypeReserved - b.err = err -} - -// Close will release resources. -// Closed blockDec cannot be reset. -func (b *blockDec) Close() { -} - -// decodeBuf -func (b *blockDec) decodeBuf(hist *history) error { - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxCompressedBlockSize) - } - } - b.dst = b.dst[:b.RLESize] - v := b.data[0] - for i := range b.dst { - b.dst[i] = v - } - hist.appendKeep(b.dst) - return nil - case blockTypeRaw: - hist.appendKeep(b.data) - return nil - case blockTypeCompressed: - saved := b.dst - // Append directly to history - if hist.ignoreBuffer == 0 { - b.dst = hist.b - hist.b = nil - } else { - b.dst = b.dst[:0] - } - err := b.decodeCompressed(hist) - if debugDecoder { - println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) - } - if hist.ignoreBuffer == 0 { - hist.b = b.dst - b.dst = saved - } else { - hist.appendKeep(b.dst) - } - return err - case blockTypeReserved: - // Used for returning errors. - return b.err - default: - panic("Invalid block type") - } -} - -func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { - // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header - if len(in) < 2 { - return in, ErrBlockTooSmall - } - - litType := literalsBlockType(in[0] & 3) - var litRegenSize int - var litCompSize int - sizeFormat := (in[0] >> 2) & 3 - var fourStreams bool - var literals []byte - switch litType { - case literalsBlockRaw, literalsBlockRLE: - switch sizeFormat { - case 0, 2: - // Regenerated_Size uses 5 bits (0-31). Literals_Section_Header uses 1 byte. - litRegenSize = int(in[0] >> 3) - in = in[1:] - case 1: - // Regenerated_Size uses 12 bits (0-4095). Literals_Section_Header uses 2 bytes. - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) - in = in[2:] - case 3: - // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) - in = in[3:] - } - case literalsBlockCompressed, literalsBlockTreeless: - switch sizeFormat { - case 0, 1: - // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). - if len(in) < 3 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) - litRegenSize = int(n & 1023) - litCompSize = int(n >> 10) - fourStreams = sizeFormat == 1 - in = in[3:] - case 2: - fourStreams = true - if len(in) < 4 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) - litRegenSize = int(n & 16383) - litCompSize = int(n >> 14) - in = in[4:] - case 3: - fourStreams = true - if len(in) < 5 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return in, ErrBlockTooSmall - } - n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) - litRegenSize = int(n & 262143) - litCompSize = int(n >> 18) - in = in[5:] - } - } - if debugDecoder { - println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) - } - if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { - return in, ErrWindowSizeExceeded - } - - switch litType { - case literalsBlockRaw: - if len(in) < litRegenSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return in, ErrBlockTooSmall - } - literals = in[:litRegenSize] - in = in[litRegenSize:] - //printf("Found %d uncompressed literals\n", litRegenSize) - case literalsBlockRLE: - if len(in) < 1 { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return in, ErrBlockTooSmall - } - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, litRegenSize, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, litRegenSize, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - literals = b.literalBuf[:litRegenSize] - v := in[0] - for i := range literals { - literals[i] = v - } - in = in[1:] - if debugDecoder { - printf("Found %d RLE compressed literals\n", litRegenSize) - } - case literalsBlockTreeless: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - // Store compressed literals, so we defer decoding until we get history. - literals = in[:litCompSize] - in = in[litCompSize:] - if debugDecoder { - printf("Found %d compressed literals\n", litCompSize) - } - huff := hist.huffTree - if huff == nil { - return in, errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - var err error - // Use our out buffer. - huff.MaxDecodedSize = litRegenSize - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return in, err - } - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return in, ErrBlockTooSmall - } - literals = in[:litCompSize] - in = in[litCompSize:] - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize+compressedBlockOverAlloc) - } else { - b.literalBuf = make([]byte, 0, maxCompressedBlockSize+compressedBlockOverAlloc) - } - } - huff := hist.huffTree - if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { - huff = huffDecoderPool.Get().(*huff0.Scratch) - if huff == nil { - huff = &huff0.Scratch{} - } - } - var err error - if debugDecoder { - println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals)) - } - huff, literals, err = huff0.ReadTable(literals, huff) - if err != nil { - println("reading huffman table:", err) - return in, err - } - hist.huffTree = huff - huff.MaxDecodedSize = litRegenSize - // Use our out buffer. - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - if err != nil { - println("decoding compressed literals:", err) - return in, err - } - // Make sure we don't leak our literals buffer - if len(literals) != litRegenSize { - return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - // Re-cap to get extra size. - literals = b.literalBuf[:len(literals)] - if debugDecoder { - printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) - } - } - hist.decoders.literals = literals - return in, nil -} - -// decodeCompressed will start decompressing a block. -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - in, err := b.decodeLiterals(in, hist) - if err != nil { - return err - } - err = b.prepareSequences(in, hist) - if err != nil { - return err - } - if hist.decoders.nSeqs == 0 { - b.dst = append(b.dst, hist.decoders.literals...) - return nil - } - before := len(hist.decoders.out) - err = hist.decoders.decodeSync(hist.b[hist.ignoreBuffer:]) - if err != nil { - return err - } - if hist.decoders.maxSyncLen > 0 { - hist.decoders.maxSyncLen += uint64(before) - hist.decoders.maxSyncLen -= uint64(len(hist.decoders.out)) - } - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - return nil -} - -func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { - if debugDecoder { - printf("prepareSequences: %d byte(s) input\n", len(in)) - } - // Decode Sequences - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section - if len(in) < 1 { - return ErrBlockTooSmall - } - var nSeqs int - seqHeader := in[0] - switch { - case seqHeader < 128: - nSeqs = int(seqHeader) - in = in[1:] - case seqHeader < 255: - if len(in) < 2 { - return ErrBlockTooSmall - } - nSeqs = int(seqHeader-128)<<8 | int(in[1]) - in = in[2:] - case seqHeader == 255: - if len(in) < 3 { - return ErrBlockTooSmall - } - nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8) - in = in[3:] - } - if nSeqs == 0 && len(in) != 0 { - // When no sequences, there should not be any more data... - if debugDecoder { - printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in)) - } - return ErrUnexpectedBlockSize - } - - var seqs = &hist.decoders - seqs.nSeqs = nSeqs - if nSeqs > 0 { - if len(in) < 1 { - return ErrBlockTooSmall - } - br := byteReader{b: in, off: 0} - compMode := br.Uint8() - br.advance(1) - if debugDecoder { - printf("Compression modes: 0b%b", compMode) - } - if compMode&3 != 0 { - return errors.New("corrupt block: reserved bits not zero") - } - for i := uint(0); i < 3; i++ { - mode := seqCompMode((compMode >> (6 - i*2)) & 3) - if debugDecoder { - println("Table", tableIndex(i), "is", mode) - } - var seq *sequenceDec - switch tableIndex(i) { - case tableLiteralLengths: - seq = &seqs.litLengths - case tableOffsets: - seq = &seqs.offsets - case tableMatchLengths: - seq = &seqs.matchLengths - default: - panic("unknown table") - } - switch mode { - case compModePredefined: - if seq.fse != nil && !seq.fse.preDefined { - fseDecoderPool.Put(seq.fse) - } - seq.fse = &fsePredef[i] - case compModeRLE: - if br.remain() < 1 { - return ErrBlockTooSmall - } - v := br.Uint8() - br.advance(1) - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - symb, err := decSymbolValue(v, symbolTableX[i]) - if err != nil { - printf("RLE Transform table (%v) error: %v", tableIndex(i), err) - return err - } - seq.fse.setRLE(symb) - if debugDecoder { - printf("RLE set to 0x%x, code: %v", symb, v) - } - case compModeFSE: - if debugDecoder { - println("Reading table for", tableIndex(i)) - } - if seq.fse == nil || seq.fse.preDefined { - seq.fse = fseDecoderPool.Get().(*fseDecoder) - } - err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) - if err != nil { - println("Read table error:", err) - return err - } - err = seq.fse.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder { - println("Read table ok", "symbolLen:", seq.fse.symbolLen) - } - case compModeRepeat: - seq.repeat = true - } - if br.overread() { - return io.ErrUnexpectedEOF - } - } - in = br.unread() - } - if debugDecoder { - println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") - } - - if nSeqs == 0 { - if len(b.sequence) > 0 { - b.sequence = b.sequence[:0] - } - return nil - } - br := seqs.br - if br == nil { - br = &bitReader{} - } - if err := br.init(in); err != nil { - return err - } - - if err := seqs.initialize(br, hist, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } - - return nil -} - -func (b *blockDec) decodeSequences(hist *history) error { - if cap(b.sequence) < hist.decoders.nSeqs { - if b.lowMem { - b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) - } else { - b.sequence = make([]seqVals, 0, 0x7F00+0xffff) - } - } - b.sequence = b.sequence[:hist.decoders.nSeqs] - if hist.decoders.nSeqs == 0 { - hist.decoders.seqSize = len(hist.decoders.literals) - return nil - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.prevOffset = hist.recentOffsets - - err := hist.decoders.decode(b.sequence) - hist.recentOffsets = hist.decoders.prevOffset - return err -} - -func (b *blockDec) executeSequences(hist *history) error { - hbytes := hist.b - if len(hbytes) > hist.windowSize { - hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history anymore. - if hist.dict != nil { - hist.dict.content = nil - } - } - hist.decoders.windowSize = hist.windowSize - hist.decoders.out = b.dst[:0] - err := hist.decoders.execute(b.sequence, hbytes) - if err != nil { - return err - } - return b.updateHistory(hist) -} - -func (b *blockDec) updateHistory(hist *history) error { - if len(b.data) > maxCompressedBlockSize { - return fmt.Errorf("compressed block size too large (%d)", len(b.data)) - } - // Set output and release references. - b.dst = hist.decoders.out - hist.recentOffsets = hist.decoders.prevOffset - - if b.Last { - // if last block we don't care about history. - println("Last block, no history returned") - hist.b = hist.b[:0] - return nil - } else { - hist.append(b.dst) - if debugDecoder { - println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) - } - } - hist.decoders.out, hist.decoders.literals = nil, nil - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go deleted file mode 100644 index 32a7f401d5..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ /dev/null @@ -1,909 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - - "github.com/klauspost/compress/huff0" -) - -type blockEnc struct { - size int - literals []byte - sequences []seq - coders seqCoders - litEnc *huff0.Scratch - dictLitEnc *huff0.Scratch - wr bitWriter - - extraLits int - output []byte - recentOffsets [3]uint32 - prevRecentOffsets [3]uint32 - - last bool - lowMem bool -} - -// init should be used once the block has been created. -// If called more than once, the effect is the same as calling reset. -func (b *blockEnc) init() { - if b.lowMem { - // 1K literals - if cap(b.literals) < 1<<10 { - b.literals = make([]byte, 0, 1<<10) - } - const defSeqs = 20 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - // 1K - if cap(b.output) < 1<<10 { - b.output = make([]byte, 0, 1<<10) - } - } else { - if cap(b.literals) < maxCompressedBlockSize { - b.literals = make([]byte, 0, maxCompressedBlockSize) - } - const defSeqs = 2000 - if cap(b.sequences) < defSeqs { - b.sequences = make([]seq, 0, defSeqs) - } - if cap(b.output) < maxCompressedBlockSize { - b.output = make([]byte, 0, maxCompressedBlockSize) - } - } - - if b.coders.mlEnc == nil { - b.coders.mlEnc = &fseEncoder{} - b.coders.mlPrev = &fseEncoder{} - b.coders.ofEnc = &fseEncoder{} - b.coders.ofPrev = &fseEncoder{} - b.coders.llEnc = &fseEncoder{} - b.coders.llPrev = &fseEncoder{} - } - b.litEnc = &huff0.Scratch{WantLogLess: 4} - b.reset(nil) -} - -// initNewEncode can be used to reset offsets and encoders to the initial state. -func (b *blockEnc) initNewEncode() { - b.recentOffsets = [3]uint32{1, 4, 8} - b.litEnc.Reuse = huff0.ReusePolicyNone - b.coders.setPrev(nil, nil, nil) -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) reset(prev *blockEnc) { - b.extraLits = 0 - b.literals = b.literals[:0] - b.size = 0 - b.sequences = b.sequences[:0] - b.output = b.output[:0] - b.last = false - if prev != nil { - b.recentOffsets = prev.prevRecentOffsets - } - b.dictLitEnc = nil -} - -// reset will reset the block for a new encode, but in the same stream, -// meaning that state will be carried over, but the block content is reset. -// If a previous block is provided, the recent offsets are carried over. -func (b *blockEnc) swapEncoders(prev *blockEnc) { - b.coders.swap(&prev.coders) - b.litEnc, prev.litEnc = prev.litEnc, b.litEnc -} - -// blockHeader contains the information for a block header. -type blockHeader uint32 - -// setLast sets the 'last' indicator on a block. -func (h *blockHeader) setLast(b bool) { - if b { - *h = *h | 1 - } else { - const mask = (1 << 24) - 2 - *h = *h & mask - } -} - -// setSize will store the compressed size of a block. -func (h *blockHeader) setSize(v uint32) { - const mask = 7 - *h = (*h)&mask | blockHeader(v<<3) -} - -// setType sets the block type. -func (h *blockHeader) setType(t blockType) { - const mask = 1 | (((1 << 24) - 1) ^ 7) - *h = (*h & mask) | blockHeader(t<<1) -} - -// appendTo will append the block header to a slice. -func (h blockHeader) appendTo(b []byte) []byte { - return append(b, uint8(h), uint8(h>>8), uint8(h>>16)) -} - -// String returns a string representation of the block. -func (h blockHeader) String() string { - return fmt.Sprintf("Type: %d, Size: %d, Last:%t", (h>>1)&3, h>>3, h&1 == 1) -} - -// literalsHeader contains literals header information. -type literalsHeader uint64 - -// setType can be used to set the type of literal block. -func (h *literalsHeader) setType(t literalsBlockType) { - const mask = math.MaxUint64 - 3 - *h = (*h & mask) | literalsHeader(t) -} - -// setSize can be used to set a single size, for uncompressed and RLE content. -func (h *literalsHeader) setSize(regenLen int) { - inBits := bits.Len32(uint32(regenLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case inBits < 5: - lh |= (uint64(regenLen) << 3) | (1 << 60) - if debugEncoder { - got := int(lh>>3) & 0xff - if got != regenLen { - panic(fmt.Sprint("litRegenSize = ", regenLen, "(want) != ", got, "(got)")) - } - } - case inBits < 12: - lh |= (1 << 2) | (uint64(regenLen) << 4) | (2 << 60) - case inBits < 20: - lh |= (3 << 2) | (uint64(regenLen) << 4) | (3 << 60) - default: - panic(fmt.Errorf("internal error: block too big (%d)", regenLen)) - } - *h = literalsHeader(lh) -} - -// setSizes will set the size of a compressed literals section and the input length. -func (h *literalsHeader) setSizes(compLen, inLen int, single bool) { - compBits, inBits := bits.Len32(uint32(compLen)), bits.Len32(uint32(inLen)) - // Only retain 2 bits - const mask = 3 - lh := uint64(*h & mask) - switch { - case compBits <= 10 && inBits <= 10: - if !single { - lh |= 1 << 2 - } - lh |= (uint64(inLen) << 4) | (uint64(compLen) << (10 + 4)) | (3 << 60) - if debugEncoder { - const mmask = (1 << 24) - 1 - n := (lh >> 4) & mmask - if int(n&1023) != inLen { - panic(fmt.Sprint("regensize:", int(n&1023), "!=", inLen, inBits)) - } - if int(n>>10) != compLen { - panic(fmt.Sprint("compsize:", int(n>>10), "!=", compLen, compBits)) - } - } - case compBits <= 14 && inBits <= 14: - lh |= (2 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (14 + 4)) | (4 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - case compBits <= 18 && inBits <= 18: - lh |= (3 << 2) | (uint64(inLen) << 4) | (uint64(compLen) << (18 + 4)) | (5 << 60) - if single { - panic("single stream used with more than 10 bits length.") - } - default: - panic("internal error: block too big") - } - *h = literalsHeader(lh) -} - -// appendTo will append the literals header to a byte slice. -func (h literalsHeader) appendTo(b []byte) []byte { - size := uint8(h >> 60) - switch size { - case 1: - b = append(b, uint8(h)) - case 2: - b = append(b, uint8(h), uint8(h>>8)) - case 3: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16)) - case 4: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24)) - case 5: - b = append(b, uint8(h), uint8(h>>8), uint8(h>>16), uint8(h>>24), uint8(h>>32)) - default: - panic(fmt.Errorf("internal error: literalsHeader has invalid size (%d)", size)) - } - return b -} - -// size returns the output size with currently set values. -func (h literalsHeader) size() int { - return int(h >> 60) -} - -func (h literalsHeader) String() string { - return fmt.Sprintf("Type: %d, SizeFormat: %d, Size: 0x%d, Bytes:%d", literalsBlockType(h&3), (h>>2)&3, h&((1<<60)-1)>>4, h>>60) -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) pushOffsets() { - b.prevRecentOffsets = b.recentOffsets -} - -// pushOffsets will push the recent offsets to the backup store. -func (b *blockEnc) popOffsets() { - b.recentOffsets = b.prevRecentOffsets -} - -// matchOffset will adjust recent offsets and return the adjusted one, -// if it matches a previous offset. -func (b *blockEnc) matchOffset(offset, lits uint32) uint32 { - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if true { - if lits > 0 { - switch offset { - case b.recentOffsets[0]: - offset = 1 - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } else { - switch offset { - case b.recentOffsets[1]: - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 1 - case b.recentOffsets[2]: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 2 - case b.recentOffsets[0] - 1: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset = 3 - default: - b.recentOffsets[2] = b.recentOffsets[1] - b.recentOffsets[1] = b.recentOffsets[0] - b.recentOffsets[0] = offset - offset += 3 - } - } - } else { - offset += 3 - } - return offset -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRaw(a []byte) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(a))) - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output[:0]) - b.output = append(b.output, a...) - if debugEncoder { - println("Adding RAW block, length", len(a), "last:", b.last) - } -} - -// encodeRaw can be used to set the output to a raw representation of supplied bytes. -func (b *blockEnc) encodeRawTo(dst, src []byte) []byte { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(src))) - bh.setType(blockTypeRaw) - dst = bh.appendTo(dst) - dst = append(dst, src...) - if debugEncoder { - println("Adding RAW block, length", len(src), "last:", b.last) - } - return dst -} - -// encodeLits can be used if the block is only litLen. -func (b *blockEnc) encodeLits(lits []byte, raw bool) error { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(uint32(len(lits))) - - // Don't compress extremely small blocks - if len(lits) < 8 || (len(lits) < 32 && b.dictLitEnc == nil) || raw { - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - } - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(lits) >= 1024 { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(lits, b.litEnc) - } else if len(lits) > 16 { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(lits, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - if err == nil && len(out)+5 > len(lits) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSizes(len(out), len(lits), single) - if len(out)+lh.size() >= len(lits) { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - if debugEncoder { - println("Adding RAW block, length", len(lits), "last:", b.last) - } - bh.setType(blockTypeRaw) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits...) - return nil - case huff0.ErrUseRLE: - if debugEncoder { - println("Adding RLE block, length", len(lits)) - } - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, lits[0]) - return nil - case nil: - default: - return err - } - // Compressed... - // Now, allow reuse - b.litEnc.Reuse = huff0.ReusePolicyAllow - bh.setType(blockTypeCompressed) - var lh literalsHeader - if reUsed { - if debugEncoder { - println("Reused tree, compressed to", len(out)) - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("New tree, compressed to", len(out), "tree size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - } - // Set sizes - lh.setSizes(len(out), len(lits), single) - bh.setSize(uint32(len(out) + lh.size() + 1)) - - // Write block headers. - b.output = bh.appendTo(b.output) - b.output = lh.appendTo(b.output) - // Add compressed data. - b.output = append(b.output, out...) - // No sequences. - b.output = append(b.output, 0) - return nil -} - -// encodeRLE will encode an RLE block. -func (b *blockEnc) encodeRLE(val byte, length uint32) { - var bh blockHeader - bh.setLast(b.last) - bh.setSize(length) - bh.setType(blockTypeRLE) - b.output = bh.appendTo(b.output) - b.output = append(b.output, val) -} - -// fuzzFseEncoder can be used to fuzz the FSE encoder. -func fuzzFseEncoder(data []byte) int { - if len(data) > maxSequences || len(data) < 2 { - return 0 - } - enc := fseEncoder{} - hist := enc.Histogram() - maxSym := uint8(0) - for i, v := range data { - v = v & 63 - data[i] = v - hist[v]++ - if v > maxSym { - maxSym = v - } - } - if maxSym == 0 { - // All 0 - return 0 - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) - if cnt == len(data) { - // RLE - return 0 - } - enc.HistogramFinished(maxSym, cnt) - err := enc.normalizeCount(len(data)) - if err != nil { - return 0 - } - _, err = enc.writeCount(nil) - if err != nil { - panic(err) - } - return 1 -} - -// encode will encode the block and append the output in b.output. -// Previous offset codes must be pushed if more blocks are expected. -func (b *blockEnc) encode(org []byte, raw, rawAllLits bool) error { - if len(b.sequences) == 0 { - return b.encodeLits(b.literals, rawAllLits) - } - if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 { - // Check common RLE cases. - seq := b.sequences[0] - if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 { - // Offset == 1 and 0 or 1 literals. - b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen) - return nil - } - } - - // We want some difference to at least account for the headers. - saved := b.size - len(b.literals) - (b.size >> 6) - if saved < 16 { - if org == nil { - return errIncompressible - } - b.popOffsets() - return b.encodeLits(org, rawAllLits) - } - - var bh blockHeader - var lh literalsHeader - bh.setLast(b.last) - bh.setType(blockTypeCompressed) - // Store offset of the block header. Needed when we know the size. - bhOffset := len(b.output) - b.output = bh.appendTo(b.output) - - var ( - out []byte - reUsed, single bool - err error - ) - if b.dictLitEnc != nil { - b.litEnc.TransferCTable(b.dictLitEnc) - b.litEnc.Reuse = huff0.ReusePolicyAllow - b.dictLitEnc = nil - } - if len(b.literals) >= 1024 && !raw { - // Use 4 Streams. - out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc) - } else if len(b.literals) > 16 && !raw { - // Use 1 stream - single = true - out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc) - } else { - err = huff0.ErrIncompressible - } - - if err == nil && len(out)+5 > len(b.literals) { - // If we are close, we may still be worse or equal to raw. - var lh literalsHeader - lh.setSize(len(b.literals)) - szRaw := lh.size() - lh.setSizes(len(out), len(b.literals), single) - szComp := lh.size() - if len(out)+szComp >= len(b.literals)+szRaw { - err = huff0.ErrIncompressible - } - } - switch err { - case huff0.ErrIncompressible: - lh.setType(literalsBlockRaw) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals...) - if debugEncoder { - println("Adding literals RAW, length", len(b.literals)) - } - case huff0.ErrUseRLE: - lh.setType(literalsBlockRLE) - lh.setSize(len(b.literals)) - b.output = lh.appendTo(b.output) - b.output = append(b.output, b.literals[0]) - if debugEncoder { - println("Adding literals RLE") - } - case nil: - // Compressed litLen... - if reUsed { - if debugEncoder { - println("reused tree") - } - lh.setType(literalsBlockTreeless) - } else { - if debugEncoder { - println("new tree, size:", len(b.litEnc.OutTable)) - } - lh.setType(literalsBlockCompressed) - if debugEncoder { - _, _, err := huff0.ReadTable(out, nil) - if err != nil { - panic(err) - } - } - } - lh.setSizes(len(out), len(b.literals), single) - if debugEncoder { - printf("Compressed %d literals to %d bytes", len(b.literals), len(out)) - println("Adding literal header:", lh) - } - b.output = lh.appendTo(b.output) - b.output = append(b.output, out...) - b.litEnc.Reuse = huff0.ReusePolicyAllow - if debugEncoder { - println("Adding literals compressed") - } - default: - if debugEncoder { - println("Adding literals ERROR:", err) - } - return err - } - // Sequence compression - - // Write the number of sequences - switch { - case len(b.sequences) < 128: - b.output = append(b.output, uint8(len(b.sequences))) - case len(b.sequences) < 0x7f00: // TODO: this could be wrong - n := len(b.sequences) - b.output = append(b.output, 128+uint8(n>>8), uint8(n)) - default: - n := len(b.sequences) - 0x7f00 - b.output = append(b.output, 255, uint8(n), uint8(n>>8)) - } - if debugEncoder { - println("Encoding", len(b.sequences), "sequences") - } - b.genCodes() - llEnc := b.coders.llEnc - ofEnc := b.coders.ofEnc - mlEnc := b.coders.mlEnc - err = llEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = ofEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - err = mlEnc.normalizeCount(len(b.sequences)) - if err != nil { - return err - } - - // Choose the best compression mode for each type. - // Will evaluate the new vs predefined and previous. - chooseComp := func(cur, prev, preDef *fseEncoder) (*fseEncoder, seqCompMode) { - // See if predefined/previous is better - hist := cur.count[:cur.symbolLen] - nSize := cur.approxSize(hist) + cur.maxHeaderSize() - predefSize := preDef.approxSize(hist) - prevSize := prev.approxSize(hist) - - // Add a small penalty for new encoders. - // Don't bother with extremely small (<2 byte gains). - nSize = nSize + (nSize+2*8*16)>>4 - switch { - case predefSize <= prevSize && predefSize <= nSize || forcePreDef: - if debugEncoder { - println("Using predefined", predefSize>>3, "<=", nSize>>3) - } - return preDef, compModePredefined - case prevSize <= nSize: - if debugEncoder { - println("Using previous", prevSize>>3, "<=", nSize>>3) - } - return prev, compModeRepeat - default: - if debugEncoder { - println("Using new, predef", predefSize>>3, ". previous:", prevSize>>3, ">", nSize>>3, "header max:", cur.maxHeaderSize()>>3, "bytes") - println("tl:", cur.actualTableLog, "symbolLen:", cur.symbolLen, "norm:", cur.norm[:cur.symbolLen], "hist", cur.count[:cur.symbolLen]) - } - return cur, compModeFSE - } - } - - // Write compression mode - var mode uint8 - if llEnc.useRLE { - mode |= uint8(compModeRLE) << 6 - llEnc.setRLE(b.sequences[0].llCode) - if debugEncoder { - println("llEnc.useRLE") - } - } else { - var m seqCompMode - llEnc, m = chooseComp(llEnc, b.coders.llPrev, &fsePredefEnc[tableLiteralLengths]) - mode |= uint8(m) << 6 - } - if ofEnc.useRLE { - mode |= uint8(compModeRLE) << 4 - ofEnc.setRLE(b.sequences[0].ofCode) - if debugEncoder { - println("ofEnc.useRLE") - } - } else { - var m seqCompMode - ofEnc, m = chooseComp(ofEnc, b.coders.ofPrev, &fsePredefEnc[tableOffsets]) - mode |= uint8(m) << 4 - } - - if mlEnc.useRLE { - mode |= uint8(compModeRLE) << 2 - mlEnc.setRLE(b.sequences[0].mlCode) - if debugEncoder { - println("mlEnc.useRLE, code: ", b.sequences[0].mlCode, "value", b.sequences[0].matchLen) - } - } else { - var m seqCompMode - mlEnc, m = chooseComp(mlEnc, b.coders.mlPrev, &fsePredefEnc[tableMatchLengths]) - mode |= uint8(m) << 2 - } - b.output = append(b.output, mode) - if debugEncoder { - printf("Compression modes: 0b%b", mode) - } - b.output, err = llEnc.writeCount(b.output) - if err != nil { - return err - } - start := len(b.output) - b.output, err = ofEnc.writeCount(b.output) - if err != nil { - return err - } - if false { - println("block:", b.output[start:], "tablelog", ofEnc.actualTableLog, "maxcount:", ofEnc.maxCount) - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", ofEnc.actualTableLog, ofEnc.symbolLen) - for i, v := range ofEnc.norm[:ofEnc.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, ofEnc.count[i], v) - } - } - b.output, err = mlEnc.writeCount(b.output) - if err != nil { - return err - } - - // Maybe in block? - wr := &b.wr - wr.reset(b.output) - - var ll, of, ml cState - - // Current sequence - seq := len(b.sequences) - 1 - s := b.sequences[seq] - llEnc.setBits(llBitsTable[:]) - mlEnc.setBits(mlBitsTable[:]) - ofEnc.setBits(nil) - - llTT, ofTT, mlTT := llEnc.ct.symbolTT[:256], ofEnc.ct.symbolTT[:256], mlEnc.ct.symbolTT[:256] - - // We have 3 bounds checks here (and in the loop). - // Since we are iterating backwards it is kinda hard to avoid. - llB, ofB, mlB := llTT[s.llCode], ofTT[s.ofCode], mlTT[s.mlCode] - ll.init(wr, &llEnc.ct, llB) - of.init(wr, &ofEnc.ct, ofB) - wr.flush32() - ml.init(wr, &mlEnc.ct, mlB) - - // Each of these lookups also generates a bounds check. - wr.addBits32NC(s.litLen, llB.outBits) - wr.addBits32NC(s.matchLen, mlB.outBits) - wr.flush32() - wr.addBits32NC(s.offset, ofB.outBits) - if debugSequences { - println("Encoded seq", seq, s, "codes:", s.llCode, s.mlCode, s.ofCode, "states:", ll.state, ml.state, of.state, "bits:", llB, mlB, ofB) - } - seq-- - // Store sequences in reverse... - for seq >= 0 { - s = b.sequences[seq] - - ofB := ofTT[s.ofCode] - wr.flush32() // tablelog max is below 8 for each, so it will fill max 24 bits. - //of.encode(ofB) - nbBitsOut := (uint32(of.state) + ofB.deltaNbBits) >> 16 - dstState := int32(of.state>>(nbBitsOut&15)) + int32(ofB.deltaFindState) - wr.addBits16NC(of.state, uint8(nbBitsOut)) - of.state = of.stateTable[dstState] - - // Accumulate extra bits. - outBits := ofB.outBits & 31 - extraBits := uint64(s.offset & bitMask32[outBits]) - extraBitsN := outBits - - mlB := mlTT[s.mlCode] - //ml.encode(mlB) - nbBitsOut = (uint32(ml.state) + mlB.deltaNbBits) >> 16 - dstState = int32(ml.state>>(nbBitsOut&15)) + int32(mlB.deltaFindState) - wr.addBits16NC(ml.state, uint8(nbBitsOut)) - ml.state = ml.stateTable[dstState] - - outBits = mlB.outBits & 31 - extraBits = extraBits<> 16 - dstState = int32(ll.state>>(nbBitsOut&15)) + int32(llB.deltaFindState) - wr.addBits16NC(ll.state, uint8(nbBitsOut)) - ll.state = ll.stateTable[dstState] - - outBits = llB.outBits & 31 - extraBits = extraBits<= b.size { - // Discard and encode as raw block. - b.output = b.encodeRawTo(b.output[:bhOffset], org) - b.popOffsets() - b.litEnc.Reuse = huff0.ReusePolicyNone - return nil - } - - // Size is output minus block header. - bh.setSize(uint32(len(b.output)-bhOffset) - 3) - if debugEncoder { - println("Rewriting block header", bh) - } - _ = bh.appendTo(b.output[bhOffset:bhOffset]) - b.coders.setPrev(llEnc, mlEnc, ofEnc) - return nil -} - -var errIncompressible = errors.New("incompressible") - -func (b *blockEnc) genCodes() { - if len(b.sequences) == 0 { - // nothing to do - return - } - if len(b.sequences) > math.MaxUint16 { - panic("can only encode up to 64K sequences") - } - // No bounds checks after here: - llH := b.coders.llEnc.Histogram() - ofH := b.coders.ofEnc.Histogram() - mlH := b.coders.mlEnc.Histogram() - for i := range llH { - llH[i] = 0 - } - for i := range ofH { - ofH[i] = 0 - } - for i := range mlH { - mlH[i] = 0 - } - - var llMax, ofMax, mlMax uint8 - for i := range b.sequences { - seq := &b.sequences[i] - v := llCode(seq.litLen) - seq.llCode = v - llH[v]++ - if v > llMax { - llMax = v - } - - v = ofCode(seq.offset) - seq.ofCode = v - ofH[v]++ - if v > ofMax { - ofMax = v - } - - v = mlCode(seq.matchLen) - seq.mlCode = v - mlH[v]++ - if v > mlMax { - mlMax = v - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d), matchlen: %d", mlMax, seq.matchLen)) - } - } - } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - if debugAsserts && mlMax > maxMatchLengthSymbol { - panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) - } - if debugAsserts && ofMax > maxOffsetBits { - panic(fmt.Errorf("ofMax > maxOffsetBits (%d)", ofMax)) - } - if debugAsserts && llMax > maxLiteralLengthSymbol { - panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) - } - - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) -} diff --git a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go b/vendor/github.com/klauspost/compress/zstd/blocktype_string.go deleted file mode 100644 index 01a01e486e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/blocktype_string.go +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated by "stringer -type=blockType,literalsBlockType,seqCompMode,tableIndex"; DO NOT EDIT. - -package zstd - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[blockTypeRaw-0] - _ = x[blockTypeRLE-1] - _ = x[blockTypeCompressed-2] - _ = x[blockTypeReserved-3] -} - -const _blockType_name = "blockTypeRawblockTypeRLEblockTypeCompressedblockTypeReserved" - -var _blockType_index = [...]uint8{0, 12, 24, 43, 60} - -func (i blockType) String() string { - if i >= blockType(len(_blockType_index)-1) { - return "blockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _blockType_name[_blockType_index[i]:_blockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[literalsBlockRaw-0] - _ = x[literalsBlockRLE-1] - _ = x[literalsBlockCompressed-2] - _ = x[literalsBlockTreeless-3] -} - -const _literalsBlockType_name = "literalsBlockRawliteralsBlockRLEliteralsBlockCompressedliteralsBlockTreeless" - -var _literalsBlockType_index = [...]uint8{0, 16, 32, 55, 76} - -func (i literalsBlockType) String() string { - if i >= literalsBlockType(len(_literalsBlockType_index)-1) { - return "literalsBlockType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _literalsBlockType_name[_literalsBlockType_index[i]:_literalsBlockType_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[compModePredefined-0] - _ = x[compModeRLE-1] - _ = x[compModeFSE-2] - _ = x[compModeRepeat-3] -} - -const _seqCompMode_name = "compModePredefinedcompModeRLEcompModeFSEcompModeRepeat" - -var _seqCompMode_index = [...]uint8{0, 18, 29, 40, 54} - -func (i seqCompMode) String() string { - if i >= seqCompMode(len(_seqCompMode_index)-1) { - return "seqCompMode(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _seqCompMode_name[_seqCompMode_index[i]:_seqCompMode_index[i+1]] -} -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[tableLiteralLengths-0] - _ = x[tableOffsets-1] - _ = x[tableMatchLengths-2] -} - -const _tableIndex_name = "tableLiteralLengthstableOffsetstableMatchLengths" - -var _tableIndex_index = [...]uint8{0, 19, 31, 48} - -func (i tableIndex) String() string { - if i >= tableIndex(len(_tableIndex_index)-1) { - return "tableIndex(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _tableIndex_name[_tableIndex_index[i]:_tableIndex_index[i+1]] -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go deleted file mode 100644 index 55a388553d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "io" -) - -type byteBuffer interface { - // Read up to 8 bytes. - // Returns io.ErrUnexpectedEOF if this cannot be satisfied. - readSmall(n int) ([]byte, error) - - // Read >8 bytes. - // MAY use the destination slice. - readBig(n int, dst []byte) ([]byte, error) - - // Read a single byte. - readByte() (byte, error) - - // Skip n bytes. - skipN(n int64) error -} - -// in-memory buffer -type byteBuf []byte - -func (b *byteBuf) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readBig(n int, dst []byte) ([]byte, error) { - bb := *b - if len(bb) < n { - return nil, io.ErrUnexpectedEOF - } - r := bb[:n] - *b = bb[n:] - return r, nil -} - -func (b *byteBuf) readByte() (byte, error) { - bb := *b - if len(bb) < 1 { - return 0, io.ErrUnexpectedEOF - } - r := bb[0] - *b = bb[1:] - return r, nil -} - -func (b *byteBuf) skipN(n int64) error { - bb := *b - if n < 0 { - return fmt.Errorf("negative skip (%d) requested", n) - } - if int64(len(bb)) < n { - return io.ErrUnexpectedEOF - } - *b = bb[n:] - return nil -} - -// wrapper around a reader. -type readerWrapper struct { - r io.Reader - tmp [8]byte -} - -func (r *readerWrapper) readSmall(n int) ([]byte, error) { - if debugAsserts && n > 8 { - panic(fmt.Errorf("small read > 8 (%d). use readBig", n)) - } - n2, err := io.ReadFull(r.r, r.tmp[:n]) - // We only really care about the actual bytes read. - if err != nil { - if err == io.EOF { - return nil, io.ErrUnexpectedEOF - } - if debugDecoder { - println("readSmall: got", n2, "want", n, "err", err) - } - return nil, err - } - return r.tmp[:n], nil -} - -func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { - if cap(dst) < n { - dst = make([]byte, n) - } - n2, err := io.ReadFull(r.r, dst[:n]) - if err == io.EOF && n > 0 { - err = io.ErrUnexpectedEOF - } - return dst[:n2], err -} - -func (r *readerWrapper) readByte() (byte, error) { - n2, err := io.ReadFull(r.r, r.tmp[:1]) - if err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, err - } - if n2 != 1 { - return 0, io.ErrUnexpectedEOF - } - return r.tmp[0], nil -} - -func (r *readerWrapper) skipN(n int64) error { - n2, err := io.CopyN(io.Discard, r.r, n) - if n2 != n { - err = io.ErrUnexpectedEOF - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/bytereader.go b/vendor/github.com/klauspost/compress/zstd/bytereader.go deleted file mode 100644 index 0e59a242d8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/bytereader.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -// byteReader provides a byte reader that reads -// little endian values from a byte stream. -// The input stream is manually advanced. -// The reader performs no bounds checks. -type byteReader struct { - b []byte - off int -} - -// advance the stream b n bytes. -func (b *byteReader) advance(n uint) { - b.off += int(n) -} - -// overread returns whether we have advanced too far. -func (b *byteReader) overread() bool { - return b.off > len(b.b) -} - -// Int32 returns a little endian int32 starting at current offset. -func (b byteReader) Int32() int32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := int32(b2[3]) - v2 := int32(b2[2]) - v1 := int32(b2[1]) - v0 := int32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint8 returns the next byte -func (b *byteReader) Uint8() uint8 { - v := b.b[b.off] - return v -} - -// Uint32 returns a little endian uint32 starting at current offset. -func (b byteReader) Uint32() uint32 { - if r := b.remain(); r < 4 { - // Very rare - v := uint32(0) - for i := 1; i <= r; i++ { - v = (v << 8) | uint32(b.b[len(b.b)-i]) - } - return v - } - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// Uint32NC returns a little endian uint32 starting at current offset. -// The caller must be sure if there are at least 4 bytes left. -func (b byteReader) Uint32NC() uint32 { - b2 := b.b[b.off:] - b2 = b2[:4] - v3 := uint32(b2[3]) - v2 := uint32(b2[2]) - v1 := uint32(b2[1]) - v0 := uint32(b2[0]) - return v0 | (v1 << 8) | (v2 << 16) | (v3 << 24) -} - -// unread returns the unread portion of the input. -func (b byteReader) unread() []byte { - return b.b[b.off:] -} - -// remain will return the number of bytes remaining. -func (b byteReader) remain() int { - return len(b.b) - b.off -} diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go deleted file mode 100644 index 6a5a2988b6..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright 2020+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "errors" - "io" -) - -// HeaderMaxSize is the maximum size of a Frame and Block Header. -// If less is sent to Header.Decode it *may* still contain enough information. -const HeaderMaxSize = 14 + 3 - -// Header contains information about the first frame and block within that. -type Header struct { - // SingleSegment specifies whether the data is to be decompressed into a - // single contiguous memory segment. - // It implies that WindowSize is invalid and that FrameContentSize is valid. - SingleSegment bool - - // WindowSize is the window of data to keep while decoding. - // Will only be set if SingleSegment is false. - WindowSize uint64 - - // Dictionary ID. - // If 0, no dictionary. - DictionaryID uint32 - - // HasFCS specifies whether FrameContentSize has a valid value. - HasFCS bool - - // FrameContentSize is the expected uncompressed size of the entire frame. - FrameContentSize uint64 - - // Skippable will be true if the frame is meant to be skipped. - // This implies that FirstBlock.OK is false. - Skippable bool - - // SkippableID is the user-specific ID for the skippable frame. - // Valid values are between 0 to 15, inclusive. - SkippableID int - - // SkippableSize is the length of the user data to skip following - // the header. - SkippableSize uint32 - - // HeaderSize is the raw size of the frame header. - // - // For normal frames, it includes the size of the magic number and - // the size of the header (per section 3.1.1.1). - // It does not include the size for any data blocks (section 3.1.1.2) nor - // the size for the trailing content checksum. - // - // For skippable frames, this counts the size of the magic number - // along with the size of the size field of the payload. - // It does not include the size of the skippable payload itself. - // The total frame size is the HeaderSize plus the SkippableSize. - HeaderSize int - - // First block information. - FirstBlock struct { - // OK will be set if first block could be decoded. - OK bool - - // Is this the last block of a frame? - Last bool - - // Is the data compressed? - // If true CompressedSize will be populated. - // Unfortunately DecompressedSize cannot be determined - // without decoding the blocks. - Compressed bool - - // DecompressedSize is the expected decompressed size of the block. - // Will be 0 if it cannot be determined. - DecompressedSize int - - // CompressedSize of the data in the block. - // Does not include the block header. - // Will be equal to DecompressedSize if not Compressed. - CompressedSize int - } - - // If set there is a checksum present for the block content. - // The checksum field at the end is always 4 bytes long. - HasCheckSum bool -} - -// Decode the header from the beginning of the stream. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) Decode(in []byte) error { - _, err := h.DecodeAndStrip(in) - return err -} - -// DecodeAndStrip will decode the header from the beginning of the stream -// and on success return the remaining bytes. -// This will decode the frame header and the first block header if enough bytes are provided. -// It is recommended to provide at least HeaderMaxSize bytes. -// If the frame header cannot be read an error will be returned. -// If there isn't enough input, io.ErrUnexpectedEOF is returned. -// The FirstBlock.OK will indicate if enough information was available to decode the first block header. -func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) { - *h = Header{} - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - b, in := in[:4], in[4:] - if string(b) != frameMagic { - if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 { - return nil, ErrMagicMismatch - } - if len(in) < 4 { - return nil, io.ErrUnexpectedEOF - } - h.HeaderSize += 4 - h.Skippable = true - h.SkippableID = int(b[0] & 0xf) - h.SkippableSize = binary.LittleEndian.Uint32(in) - return in[4:], nil - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - fhd, in := in[0], in[1:] - h.HeaderSize++ - h.SingleSegment = fhd&(1<<5) != 0 - h.HasCheckSum = fhd&(1<<2) != 0 - if fhd&(1<<3) != 0 { - return nil, errors.New("reserved bit set on frame header") - } - - if !h.SingleSegment { - if len(in) < 1 { - return nil, io.ErrUnexpectedEOF - } - var wd byte - wd, in = in[0], in[1:] - h.HeaderSize++ - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - h.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - if len(in) < int(size) { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:size], in[size:] - h.HeaderSize += int(size) - switch len(b) { - case 1: - h.DictionaryID = uint32(b[0]) - case 2: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - h.DictionaryID = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if h.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - - if fcsSize > 0 { - h.HasFCS = true - if len(in) < fcsSize { - return nil, io.ErrUnexpectedEOF - } - b, in = in[:fcsSize], in[fcsSize:] - h.HeaderSize += int(fcsSize) - switch len(b) { - case 1: - h.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - h.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - h.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - } - - // Frame Header done, we will not fail from now on. - if len(in) < 3 { - return in, nil - } - tmp := in[:3] - bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16) - h.FirstBlock.Last = bh&1 != 0 - blockType := blockType((bh >> 1) & 3) - // find size. - cSize := int(bh >> 3) - switch blockType { - case blockTypeReserved: - return in, nil - case blockTypeRLE: - h.FirstBlock.Compressed = true - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = 1 - case blockTypeCompressed: - h.FirstBlock.Compressed = true - h.FirstBlock.CompressedSize = cSize - case blockTypeRaw: - h.FirstBlock.DecompressedSize = cSize - h.FirstBlock.CompressedSize = cSize - default: - panic("Invalid block type") - } - - h.FirstBlock.OK = true - return in, nil -} - -// AppendTo will append the encoded header to the dst slice. -// There is no error checking performed on the header values. -func (h *Header) AppendTo(dst []byte) ([]byte, error) { - if h.Skippable { - magic := [4]byte{0x50, 0x2a, 0x4d, 0x18} - magic[0] |= byte(h.SkippableID & 0xf) - dst = append(dst, magic[:]...) - f := h.SkippableSize - return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil - } - f := frameHeader{ - ContentSize: h.FrameContentSize, - WindowSize: uint32(h.WindowSize), - SingleSegment: h.SingleSegment, - Checksum: h.HasCheckSum, - DictID: h.DictionaryID, - } - return f.appendTo(dst), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go deleted file mode 100644 index bbca17234a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ /dev/null @@ -1,948 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "context" - "encoding/binary" - "io" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Decoder provides decoding of zstandard streams. -// The decoder has been designed to operate without allocations after a warmup. -// This means that you should store the decoder for best performance. -// To re-use a stream decoder, use the Reset(r io.Reader) error to switch to another stream. -// A decoder can safely be re-used even if the previous stream failed. -// To release the resources, you must call the Close() function on a decoder. -type Decoder struct { - o decoderOptions - - // Unreferenced decoders, ready for use. - decoders chan *blockDec - - // Current read position used for Reader functionality. - current decoderState - - // sync stream decoding - syncStream struct { - decodedFrame uint64 - br readerWrapper - enabled bool - inFrame bool - dstBuf []byte - } - - frame *frameDec - - // Custom dictionaries. - dicts map[uint32]*dict - - // streamWg is the waitgroup for all streams - streamWg sync.WaitGroup -} - -// decoderState is used for maintaining state when the decoder -// is used for streaming. -type decoderState struct { - // current block being written to stream. - decodeOutput - - // output in order to be written to stream. - output chan decodeOutput - - // cancel remaining output. - cancel context.CancelFunc - - // crc of current frame - crc *xxhash.Digest - - flushed bool -} - -var ( - // Check the interfaces we want to support. - _ = io.WriterTo(&Decoder{}) - _ = io.Reader(&Decoder{}) -) - -// NewReader creates a new decoder. -// A nil Reader can be provided in which case Reset can be used to start a decode. -// -// A Decoder can be used in two modes: -// -// 1) As a stream, or -// 2) For stateless decoding using DecodeAll. -// -// Only a single stream can be decoded concurrently, but the same decoder -// can run multiple concurrent stateless decodes. It is even possible to -// use stateless decodes while a stream is being decoded. -// -// The Reset function can be used to initiate a new stream, which will considerably -// reduce the allocations normally caused by NewReader. -func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { - initPredefined() - var d Decoder - d.o.setDefault() - for _, o := range opts { - err := o(&d.o) - if err != nil { - return nil, err - } - } - d.current.crc = xxhash.New() - d.current.flushed = true - - if r == nil { - d.current.err = ErrDecoderNilInput - } - - // Transfer option dicts. - d.dicts = make(map[uint32]*dict, len(d.o.dicts)) - for _, dc := range d.o.dicts { - d.dicts[dc.id] = dc - } - d.o.dicts = nil - - // Create decoders - d.decoders = make(chan *blockDec, d.o.concurrent) - for i := 0; i < d.o.concurrent; i++ { - dec := newBlockDec(d.o.lowMem) - dec.localFrame = newFrameDec(d.o) - d.decoders <- dec - } - - if r == nil { - return &d, nil - } - return &d, d.Reset(r) -} - -// Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. -// When the stream is done, io.EOF will be returned. -func (d *Decoder) Read(p []byte) (int, error) { - var n int - for { - if len(d.current.b) > 0 { - filled := copy(p, d.current.b) - p = p[filled:] - d.current.b = d.current.b[filled:] - n += filled - } - if len(p) == 0 { - break - } - if len(d.current.b) == 0 { - // We have an error and no more data - if d.current.err != nil { - break - } - if !d.nextBlock(n == 0) { - return n, d.current.err - } - } - } - if len(d.current.b) > 0 { - if debugDecoder { - println("returning", n, "still bytes left:", len(d.current.b)) - } - // Only return error at end of block - return n, nil - } - if d.current.err != nil { - d.drainOutput() - } - if debugDecoder { - println("returning", n, d.current.err, len(d.decoders)) - } - return n, d.current.err -} - -// Reset will reset the decoder the supplied stream after the current has finished processing. -// Note that this functionality cannot be used after Close has been called. -// Reset can be called with a nil reader to release references to the previous reader. -// After being called with a nil reader, no other operations than Reset or DecodeAll or Close -// should be used. -func (d *Decoder) Reset(r io.Reader) error { - if d.current.err == ErrDecoderClosed { - return d.current.err - } - - d.drainOutput() - - d.syncStream.br.r = nil - if r == nil { - d.current.err = ErrDecoderNilInput - if len(d.current.b) > 0 { - d.current.b = d.current.b[:0] - } - d.current.flushed = true - return nil - } - - // If bytes buffer and < 5MB, do sync decoding anyway. - if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap { - bb2 := bb - if debugDecoder { - println("*bytes.Buffer detected, doing sync decode, len:", bb.Len()) - } - b := bb2.Bytes() - var dst []byte - if cap(d.syncStream.dstBuf) > 0 { - dst = d.syncStream.dstBuf[:0] - } - - dst, err := d.DecodeAll(b, dst) - if err == nil { - err = io.EOF - } - // Save output buffer - d.syncStream.dstBuf = dst - d.current.b = dst - d.current.err = err - d.current.flushed = true - if debugDecoder { - println("sync decode to", len(dst), "bytes, err:", err) - } - return nil - } - // Remove current block. - d.stashDecoder() - d.current.decodeOutput = decodeOutput{} - d.current.err = nil - d.current.flushed = false - d.current.d = nil - d.syncStream.dstBuf = nil - - // Ensure no-one else is still running... - d.streamWg.Wait() - if d.frame == nil { - d.frame = newFrameDec(d.o) - } - - if d.o.concurrent == 1 { - return d.startSyncDecoder(r) - } - - d.current.output = make(chan decodeOutput, d.o.concurrent) - ctx, cancel := context.WithCancel(context.Background()) - d.current.cancel = cancel - d.streamWg.Add(1) - go d.startStreamDecoder(ctx, r, d.current.output) - - return nil -} - -// drainOutput will drain the output until errEndOfStream is sent. -func (d *Decoder) drainOutput() { - if d.current.cancel != nil { - if debugDecoder { - println("cancelling current") - } - d.current.cancel() - d.current.cancel = nil - } - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p, decoders: %d", d.current.d, len(d.decoders)) - } - d.decoders <- d.current.d - d.current.d = nil - d.current.b = nil - } - if d.current.output == nil || d.current.flushed { - println("current already flushed") - return - } - for v := range d.current.output { - if v.d != nil { - if debugDecoder { - printf("re-adding decoder %p", v.d) - } - d.decoders <- v.d - } - } - d.current.output = nil - d.current.flushed = true -} - -// WriteTo writes data to w until there's no more data to write or when an error occurs. -// The return value n is the number of bytes written. -// Any error encountered during the write is also returned. -func (d *Decoder) WriteTo(w io.Writer) (int64, error) { - var n int64 - for { - if len(d.current.b) > 0 { - n2, err2 := w.Write(d.current.b) - n += int64(n2) - if err2 != nil && (d.current.err == nil || d.current.err == io.EOF) { - d.current.err = err2 - } else if n2 != len(d.current.b) { - d.current.err = io.ErrShortWrite - } - } - if d.current.err != nil { - break - } - d.nextBlock(true) - } - err := d.current.err - if err != nil { - d.drainOutput() - } - if err == io.EOF { - err = nil - } - return n, err -} - -// DecodeAll allows stateless decoding of a blob of bytes. -// Output will be appended to dst, so if the destination size is known -// you can pre-allocate the destination slice to avoid allocations. -// DecodeAll can be used concurrently. -// The Decoder concurrency limits will be respected. -func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.decoders == nil { - return dst, ErrDecoderClosed - } - - // Grab a block decoder and frame decoder. - block := <-d.decoders - frame := block.localFrame - initialSize := len(dst) - defer func() { - if debugDecoder { - printf("re-adding decoder: %p", block) - } - frame.rawInput = nil - frame.bBuf = nil - if frame.history.decoders.br != nil { - frame.history.decoders.br.in = nil - } - d.decoders <- block - }() - frame.bBuf = input - - for { - frame.history.reset() - err := frame.reset(&frame.bBuf) - if err != nil { - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") - } - return dst, nil - } - return dst, err - } - if err = d.setDict(frame); err != nil { - return nil, err - } - if frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize) - } - return dst, ErrWindowSizeExceeded - } - if frame.FrameContentSize != fcsUnknown { - if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) { - if debugDecoder { - println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst)) - } - return dst, ErrDecoderSizeExceeded - } - if cap(dst)-len(dst) < int(frame.FrameContentSize) { - dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - - if cap(dst) == 0 && !d.o.limitToCap { - // Allocate len(input) * 2 by default if nothing is provided - // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } - if uint64(size) > d.o.maxDecodedSize { - size = int(d.o.maxDecodedSize) - } - dst = make([]byte, 0, size) - } - - dst, err = frame.runDecoder(dst, block) - if err != nil { - return dst, err - } - if uint64(len(dst)-initialSize) > d.o.maxDecodedSize { - return dst, ErrDecoderSizeExceeded - } - if len(frame.bBuf) == 0 { - if debugDecoder { - println("frame dbuf empty") - } - break - } - } - return dst, nil -} - -// nextBlock returns the next block. -// If an error occurs d.err will be set. -// Optionally the function can block for new output. -// If non-blocking mode is used the returned boolean will be false -// if no data was available without blocking. -func (d *Decoder) nextBlock(blocking bool) (ok bool) { - if d.current.err != nil { - // Keep error state. - return false - } - d.current.b = d.current.b[:0] - - // SYNC: - if d.syncStream.enabled { - if !blocking { - return false - } - ok = d.nextBlockSync() - if !ok { - d.stashDecoder() - } - return ok - } - - //ASYNC: - d.stashDecoder() - if blocking { - d.current.decodeOutput, ok = <-d.current.output - } else { - select { - case d.current.decodeOutput, ok = <-d.current.output: - default: - return false - } - } - if !ok { - // This should not happen, so signal error state... - d.current.err = io.ErrUnexpectedEOF - return false - } - next := d.current.decodeOutput - if next.d != nil && next.d.async.newHist != nil { - d.current.crc.Reset() - } - if debugDecoder { - var tmp [4]byte - binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) - println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) - } - - if d.o.ignoreChecksum { - return true - } - - if len(next.b) > 0 { - d.current.crc.Write(next.b) - } - if next.err == nil && next.d != nil && next.d.hasCRC { - got := uint32(d.current.crc.Sum64()) - if got != next.d.checkCRC { - if debugDecoder { - printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC) - } - d.current.err = ErrCRCMismatch - } else { - if debugDecoder { - printf("CRC ok %08x\n", got) - } - } - } - - return true -} - -func (d *Decoder) nextBlockSync() (ok bool) { - if d.current.d == nil { - d.current.d = <-d.decoders - } - for len(d.current.b) == 0 { - if !d.syncStream.inFrame { - d.frame.history.reset() - d.current.err = d.frame.reset(&d.syncStream.br) - if d.current.err == nil { - d.current.err = d.setDict(d.frame) - } - if d.current.err != nil { - return false - } - if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { - d.current.err = ErrDecoderSizeExceeded - return false - } - - d.syncStream.decodedFrame = 0 - d.syncStream.inFrame = true - } - d.current.err = d.frame.next(d.current.d) - if d.current.err != nil { - return false - } - d.frame.history.ensureBlock() - if debugDecoder { - println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) - } - histBefore := len(d.frame.history.b) - d.current.err = d.current.d.decodeBuf(&d.frame.history) - - if d.current.err != nil { - println("error after:", d.current.err) - return false - } - d.current.b = d.frame.history.b[histBefore:] - if debugDecoder { - println("history after:", len(d.frame.history.b)) - } - - // Check frame size (before CRC) - d.syncStream.decodedFrame += uint64(len(d.current.b)) - if d.syncStream.decodedFrame > d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeExceeded - return false - } - - // Check FCS - if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize { - if debugDecoder { - printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) - } - d.current.err = ErrFrameSizeMismatch - return false - } - - // Update/Check CRC - if d.frame.HasCheckSum { - if !d.o.ignoreChecksum { - d.frame.crc.Write(d.current.b) - } - if d.current.d.Last { - if !d.o.ignoreChecksum { - d.current.err = d.frame.checkCRC() - } else { - d.current.err = d.frame.consumeCRC() - } - if d.current.err != nil { - println("CRC error:", d.current.err) - return false - } - } - } - d.syncStream.inFrame = !d.current.d.Last - } - return true -} - -func (d *Decoder) stashDecoder() { - if d.current.d != nil { - if debugDecoder { - printf("re-adding current decoder %p", d.current.d) - } - d.decoders <- d.current.d - d.current.d = nil - } -} - -// Close will release all resources. -// It is NOT possible to reuse the decoder after this. -func (d *Decoder) Close() { - if d.current.err == ErrDecoderClosed { - return - } - d.drainOutput() - if d.current.cancel != nil { - d.current.cancel() - d.streamWg.Wait() - d.current.cancel = nil - } - if d.decoders != nil { - close(d.decoders) - for dec := range d.decoders { - dec.Close() - } - d.decoders = nil - } - if d.current.d != nil { - d.current.d.Close() - d.current.d = nil - } - d.current.err = ErrDecoderClosed -} - -// IOReadCloser returns the decoder as an io.ReadCloser for convenience. -// Any changes to the decoder will be reflected, so the returned ReadCloser -// can be reused along with the decoder. -// io.WriterTo is also supported by the returned ReadCloser. -func (d *Decoder) IOReadCloser() io.ReadCloser { - return closeWrapper{d: d} -} - -// closeWrapper wraps a function call as a closer. -type closeWrapper struct { - d *Decoder -} - -// WriteTo forwards WriteTo calls to the decoder. -func (c closeWrapper) WriteTo(w io.Writer) (n int64, err error) { - return c.d.WriteTo(w) -} - -// Read forwards read calls to the decoder. -func (c closeWrapper) Read(p []byte) (n int, err error) { - return c.d.Read(p) -} - -// Close closes the decoder. -func (c closeWrapper) Close() error { - c.d.Close() - return nil -} - -type decodeOutput struct { - d *blockDec - b []byte - err error -} - -func (d *Decoder) startSyncDecoder(r io.Reader) error { - d.frame.history.reset() - d.syncStream.br = readerWrapper{r: r} - d.syncStream.inFrame = false - d.syncStream.enabled = true - d.syncStream.decodedFrame = 0 - return nil -} - -// Create Decoder: -// ASYNC: -// Spawn 3 go routines. -// 0: Read frames and decode block literals. -// 1: Decode sequences. -// 2: Execute sequences, send to output. -func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { - defer d.streamWg.Done() - br := readerWrapper{r: r} - - var seqDecode = make(chan *blockDec, d.o.concurrent) - var seqExecute = make(chan *blockDec, d.o.concurrent) - - // Async 1: Decode sequences... - go func() { - var hist history - var hasErr bool - - for block := range seqDecode { - if hasErr { - if block != nil { - seqExecute <- block - } - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 1: new history, recent:", block.async.newHist.recentOffsets) - } - hist.reset() - hist.decoders = block.async.newHist.decoders - hist.recentOffsets = block.async.newHist.recentOffsets - hist.windowSize = block.async.newHist.windowSize - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqExecute <- block - continue - } - - hist.decoders.literals = block.async.literals - block.err = block.prepareSequences(block.async.seqData, &hist) - if debugDecoder && block.err != nil { - println("prepareSequences returned:", block.err) - } - hasErr = block.err != nil - if block.err == nil { - block.err = block.decodeSequences(&hist) - if debugDecoder && block.err != nil { - println("decodeSequences returned:", block.err) - } - hasErr = block.err != nil - // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] - block.async.seqSize = hist.decoders.seqSize - } - seqExecute <- block - } - close(seqExecute) - hist.reset() - }() - - var wg sync.WaitGroup - wg.Add(1) - - // Async 3: Execute sequences... - frameHistCache := d.frame.history.b - go func() { - var hist history - var decodedFrame uint64 - var fcs uint64 - var hasErr bool - for block := range seqExecute { - out := decodeOutput{err: block.err, d: block} - if block.err != nil || hasErr { - hasErr = true - output <- out - continue - } - if block.async.newHist != nil { - if debugDecoder { - println("Async 2: new history") - } - hist.reset() - hist.windowSize = block.async.newHist.windowSize - hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer - if block.async.newHist.dict != nil { - hist.setDict(block.async.newHist.dict) - } - - if cap(hist.b) < hist.allocFrameBuffer { - if cap(frameHistCache) >= hist.allocFrameBuffer { - hist.b = frameHistCache - } else { - hist.b = make([]byte, 0, hist.allocFrameBuffer) - println("Alloc history sized", hist.allocFrameBuffer) - } - } - hist.b = hist.b[:0] - fcs = block.async.fcs - decodedFrame = 0 - } - do := decodeOutput{err: block.err, d: block} - switch block.Type { - case blockTypeRLE: - if debugDecoder { - println("add rle block length:", block.RLESize) - } - - if cap(block.dst) < int(block.RLESize) { - if block.lowMem { - block.dst = make([]byte, block.RLESize) - } else { - block.dst = make([]byte, maxCompressedBlockSize) - } - } - block.dst = block.dst[:block.RLESize] - v := block.data[0] - for i := range block.dst { - block.dst[i] = v - } - hist.append(block.dst) - do.b = block.dst - case blockTypeRaw: - if debugDecoder { - println("add raw block length:", len(block.data)) - } - hist.append(block.data) - do.b = block.data - case blockTypeCompressed: - if debugDecoder { - println("execute with history length:", len(hist.b), "window:", hist.windowSize) - } - hist.decoders.seqSize = block.async.seqSize - hist.decoders.literals = block.async.literals - do.err = block.executeSequences(&hist) - hasErr = do.err != nil - if debugDecoder && hasErr { - println("executeSequences returned:", do.err) - } - do.b = block.dst - } - if !hasErr { - decodedFrame += uint64(len(do.b)) - if decodedFrame > fcs { - println("fcs exceeded", block.Last, fcs, decodedFrame) - do.err = ErrFrameSizeExceeded - hasErr = true - } else if block.Last && fcs != fcsUnknown && decodedFrame != fcs { - do.err = ErrFrameSizeMismatch - hasErr = true - } else { - if debugDecoder { - println("fcs ok", block.Last, fcs, decodedFrame) - } - } - } - output <- do - } - close(output) - frameHistCache = hist.b - wg.Done() - if debugDecoder { - println("decoder goroutines finished") - } - hist.reset() - }() - - var hist history -decodeStream: - for { - var hasErr bool - hist.reset() - decodeBlock := func(block *blockDec) { - if hasErr { - if block != nil { - seqDecode <- block - } - return - } - if block.err != nil || block.Type != blockTypeCompressed { - hasErr = block.err != nil - seqDecode <- block - return - } - - remain, err := block.decodeLiterals(block.data, &hist) - block.err = err - hasErr = block.err != nil - if err == nil { - block.async.literals = hist.decoders.literals - block.async.seqData = remain - } else if debugDecoder { - println("decodeLiterals error:", err) - } - seqDecode <- block - } - frame := d.frame - if debugDecoder { - println("New frame...") - } - var historySent bool - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) - } - if err == nil { - err = d.setDict(frame) - } - if err == nil && d.frame.WindowSize > d.o.maxWindowSize { - if debugDecoder { - println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize) - } - - err = ErrDecoderSizeExceeded - } - if err != nil { - select { - case <-ctx.Done(): - case dec := <-d.decoders: - dec.sendErr(err) - decodeBlock(dec) - } - break decodeStream - } - - // Go through all blocks of the frame. - for { - var dec *blockDec - select { - case <-ctx.Done(): - break decodeStream - case dec = <-d.decoders: - // Once we have a decoder, we MUST return it. - } - err := frame.next(dec) - if !historySent { - h := frame.history - if debugDecoder { - println("Alloc History:", h.allocFrameBuffer) - } - hist.reset() - if h.dict != nil { - hist.setDict(h.dict) - } - dec.async.newHist = &h - dec.async.fcs = frame.FrameContentSize - historySent = true - } else { - dec.async.newHist = nil - } - if debugDecoder && err != nil { - println("next block returned error:", err) - } - dec.err = err - dec.hasCRC = false - if dec.Last && frame.HasCheckSum && err == nil { - crc, err := frame.rawInput.readSmall(4) - if len(crc) < 4 { - if err == nil { - err = io.ErrUnexpectedEOF - - } - println("CRC missing?", err) - dec.err = err - } else { - dec.checkCRC = binary.LittleEndian.Uint32(crc) - dec.hasCRC = true - if debugDecoder { - printf("found crc to check: %08x\n", dec.checkCRC) - } - } - } - err = dec.err - last := dec.Last - decodeBlock(dec) - if err != nil { - break decodeStream - } - if last { - break - } - } - } - close(seqDecode) - wg.Wait() - hist.reset() - d.frame.history.b = frameHistCache -} - -func (d *Decoder) setDict(frame *frameDec) (err error) { - dict, ok := d.dicts[frame.DictionaryID] - if ok { - if debugDecoder { - println("setting dict", frame.DictionaryID) - } - frame.history.setDict(dict) - } else if frame.DictionaryID != 0 { - // A zero or missing dictionary id is ambiguous: - // either dictionary zero, or no dictionary. In particular, - // zstd --patch-from uses this id for the source file, - // so only return an error if the dictionary id is not zero. - err = ErrUnknownDictionary - } - return err -} diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go deleted file mode 100644 index 774c5f00fe..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math/bits" - "runtime" -) - -// DOption is an option for creating a decoder. -type DOption func(*decoderOptions) error - -// options retains accumulated state of multiple options. -type decoderOptions struct { - lowMem bool - concurrent int - maxDecodedSize uint64 - maxWindowSize uint64 - dicts []*dict - ignoreChecksum bool - limitToCap bool - decodeBufsBelow int -} - -func (o *decoderOptions) setDefault() { - *o = decoderOptions{ - // use less ram: true for now, but may change. - lowMem: true, - concurrent: runtime.GOMAXPROCS(0), - maxWindowSize: MaxWindowSize, - decodeBufsBelow: 128 << 10, - } - if o.concurrent > 4 { - o.concurrent = 4 - } - o.maxDecodedSize = 64 << 30 -} - -// WithDecoderLowmem will set whether to use a lower amount of memory, -// but possibly have to allocate more while running. -func WithDecoderLowmem(b bool) DOption { - return func(o *decoderOptions) error { o.lowMem = b; return nil } -} - -// WithDecoderConcurrency sets the number of created decoders. -// When decoding block with DecodeAll, this will limit the number -// of possible concurrently running decodes. -// When decoding streams, this will limit the number of -// inflight blocks. -// When decoding streams and setting maximum to 1, -// no async decoding will be done. -// When a value of 0 is provided GOMAXPROCS will be used. -// By default this will be set to 4 or GOMAXPROCS, whatever is lower. -func WithDecoderConcurrency(n int) DOption { - return func(o *decoderOptions) error { - if n < 0 { - return errors.New("concurrency must be at least 1") - } - if n == 0 { - o.concurrent = runtime.GOMAXPROCS(0) - } else { - o.concurrent = n - } - return nil - } -} - -// WithDecoderMaxMemory allows to set a maximum decoded size for in-memory -// non-streaming operations or maximum window size for streaming operations. -// This can be used to control memory usage of potentially hostile content. -// Maximum is 1 << 63 bytes. Default is 64GiB. -func WithDecoderMaxMemory(n uint64) DOption { - return func(o *decoderOptions) error { - if n == 0 { - return errors.New("WithDecoderMaxMemory must be at least 1") - } - if n > 1<<63 { - return errors.New("WithDecoderMaxmemory must be less than 1 << 63") - } - o.maxDecodedSize = n - return nil - } -} - -// WithDecoderDicts allows to register one or more dictionaries for the decoder. -// -// Each slice in dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// If several dictionaries with the same ID are provided, the last one will be used. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithDecoderDicts(dicts ...[]byte) DOption { - return func(o *decoderOptions) error { - for _, b := range dicts { - d, err := loadDict(b) - if err != nil { - return err - } - o.dicts = append(o.dicts, d) - } - return nil - } -} - -// WithDecoderDictRaw registers a dictionary that may be used by the decoder. -// The slice content can be arbitrary data. -func WithDecoderDictRaw(id uint32, content []byte) DOption { - return func(o *decoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}) - return nil - } -} - -// WithDecoderMaxWindow allows to set a maximum window size for decodes. -// This allows rejecting packets that will cause big memory usage. -// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting. -// If WithDecoderMaxMemory is set to a lower value, that will be used. -// Default is 512MB, Maximum is ~3.75 TB as per zstandard spec. -func WithDecoderMaxWindow(size uint64) DOption { - return func(o *decoderOptions) error { - if size < MinWindowSize { - return errors.New("WithMaxWindowSize must be at least 1KB, 1024 bytes") - } - if size > (1<<41)+7*(1<<38) { - return errors.New("WithMaxWindowSize must be less than (1<<41) + 7*(1<<38) ~ 3.75TB") - } - o.maxWindowSize = size - return nil - } -} - -// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes, -// or any size set in WithDecoderMaxMemory. -// This can be used to limit decoding to a specific maximum output size. -// Disabled by default. -func WithDecodeAllCapLimit(b bool) DOption { - return func(o *decoderOptions) error { - o.limitToCap = b - return nil - } -} - -// WithDecodeBuffersBelow will fully decode readers that have a -// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer. -// This typically uses less allocations but will have the full decompressed object in memory. -// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less. -// Default is 128KiB. -func WithDecodeBuffersBelow(size int) DOption { - return func(o *decoderOptions) error { - o.decodeBufsBelow = size - return nil - } -} - -// IgnoreChecksum allows to forcibly ignore checksum checking. -func IgnoreChecksum(b bool) DOption { - return func(o *decoderOptions) error { - o.ignoreChecksum = b - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go deleted file mode 100644 index b7b83164bc..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ /dev/null @@ -1,565 +0,0 @@ -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math" - "sort" - - "github.com/klauspost/compress/huff0" -) - -type dict struct { - id uint32 - - litEnc *huff0.Scratch - llDec, ofDec, mlDec sequenceDec - offsets [3]int - content []byte -} - -const dictMagic = "\x37\xa4\x30\xec" - -// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB. -const dictMaxLength = 1 << 31 - -// ID returns the dictionary id or 0 if d is nil. -func (d *dict) ID() uint32 { - if d == nil { - return 0 - } - return d.id -} - -// ContentSize returns the dictionary content size or 0 if d is nil. -func (d *dict) ContentSize() int { - if d == nil { - return 0 - } - return len(d.content) -} - -// Content returns the dictionary content. -func (d *dict) Content() []byte { - if d == nil { - return nil - } - return d.content -} - -// Offsets returns the initial offsets. -func (d *dict) Offsets() [3]int { - if d == nil { - return [3]int{} - } - return d.offsets -} - -// LitEncoder returns the literal encoder. -func (d *dict) LitEncoder() *huff0.Scratch { - if d == nil { - return nil - } - return d.litEnc -} - -// Load a dictionary as described in -// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format -func loadDict(b []byte) (*dict, error) { - // Check static field size. - if len(b) <= 8+(3*4) { - return nil, io.ErrUnexpectedEOF - } - d := dict{ - llDec: sequenceDec{fse: &fseDecoder{}}, - ofDec: sequenceDec{fse: &fseDecoder{}}, - mlDec: sequenceDec{fse: &fseDecoder{}}, - } - if string(b[:4]) != dictMagic { - return nil, ErrMagicMismatch - } - d.id = binary.LittleEndian.Uint32(b[4:8]) - if d.id == 0 { - return nil, errors.New("dictionaries cannot have ID 0") - } - - // Read literal table - var err error - d.litEnc, b, err = huff0.ReadTable(b[8:], nil) - if err != nil { - return nil, fmt.Errorf("loading literal table: %w", err) - } - d.litEnc.Reuse = huff0.ReusePolicyMust - - br := byteReader{ - b: b, - off: 0, - } - readDec := func(i tableIndex, dec *fseDecoder) error { - if err := dec.readNCount(&br, uint16(maxTableSymbol[i])); err != nil { - return err - } - if br.overread() { - return io.ErrUnexpectedEOF - } - err = dec.transform(symbolTableX[i]) - if err != nil { - println("Transform table error:", err) - return err - } - if debugDecoder || debugEncoder { - println("Read table ok", "symbolLen:", dec.symbolLen) - } - // Set decoders as predefined so they aren't reused. - dec.preDefined = true - return nil - } - - if err := readDec(tableOffsets, d.ofDec.fse); err != nil { - return nil, err - } - if err := readDec(tableMatchLengths, d.mlDec.fse); err != nil { - return nil, err - } - if err := readDec(tableLiteralLengths, d.llDec.fse); err != nil { - return nil, err - } - if br.remain() < 12 { - return nil, io.ErrUnexpectedEOF - } - - d.offsets[0] = int(br.Uint32()) - br.advance(4) - d.offsets[1] = int(br.Uint32()) - br.advance(4) - d.offsets[2] = int(br.Uint32()) - br.advance(4) - if d.offsets[0] <= 0 || d.offsets[1] <= 0 || d.offsets[2] <= 0 { - return nil, errors.New("invalid offset in dictionary") - } - d.content = make([]byte, br.remain()) - copy(d.content, br.unread()) - if d.offsets[0] > len(d.content) || d.offsets[1] > len(d.content) || d.offsets[2] > len(d.content) { - return nil, fmt.Errorf("initial offset bigger than dictionary content size %d, offsets: %v", len(d.content), d.offsets) - } - - return &d, nil -} - -// InspectDictionary loads a zstd dictionary and provides functions to inspect the content. -func InspectDictionary(b []byte) (interface { - ID() uint32 - ContentSize() int - Content() []byte - Offsets() [3]int - LitEncoder() *huff0.Scratch -}, error) { - initPredefined() - d, err := loadDict(b) - return d, err -} - -type BuildDictOptions struct { - // Dictionary ID. - ID uint32 - - // Content to use to create dictionary tables. - Contents [][]byte - - // History to use for all blocks. - History []byte - - // Offsets to use. - Offsets [3]int - - // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier. - // See https://github.com/facebook/zstd/issues/3724 - CompatV155 bool - - // Use the specified encoder level. - // The dictionary will be built using the specified encoder level, - // which will reflect speed and make the dictionary tailored for that level. - // If not set SpeedBestCompression will be used. - Level EncoderLevel - - // DebugOut will write stats and other details here if set. - DebugOut io.Writer -} - -func BuildDict(o BuildDictOptions) ([]byte, error) { - initPredefined() - hist := o.History - contents := o.Contents - debug := o.DebugOut != nil - println := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintln(o.DebugOut, args...) - } - } - printf := func(s string, args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprintf(o.DebugOut, s, args...) - } - } - print := func(args ...interface{}) { - if o.DebugOut != nil { - fmt.Fprint(o.DebugOut, args...) - } - } - - if int64(len(hist)) > dictMaxLength { - return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength)) - } - if len(hist) < 8 { - return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8) - } - if len(contents) == 0 { - return nil, errors.New("no content provided") - } - d := dict{ - id: o.ID, - litEnc: nil, - llDec: sequenceDec{}, - ofDec: sequenceDec{}, - mlDec: sequenceDec{}, - offsets: o.Offsets, - content: hist, - } - block := blockEnc{lowMem: false} - block.init() - enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}}) - if o.Level != 0 { - eOpts := encoderOptions{ - level: o.Level, - blockSize: maxMatchLen, - windowSize: maxMatchLen, - dict: &d, - lowMem: false, - } - enc = eOpts.encoder() - } else { - o.Level = SpeedBestCompression - } - var ( - remain [256]int - ll [256]int - ml [256]int - of [256]int - ) - addValues := func(dst *[256]int, src []byte) { - for _, v := range src { - dst[v]++ - } - } - addHist := func(dst *[256]int, src *[256]uint32) { - for i, v := range src { - dst[i] += int(v) - } - } - seqs := 0 - nUsed := 0 - litTotal := 0 - newOffsets := make(map[uint32]int, 1000) - for _, b := range contents { - block.reset(nil) - if len(b) < 8 { - continue - } - nUsed++ - enc.Reset(&d, true) - enc.Encode(&block, b) - addValues(&remain, block.literals) - litTotal += len(block.literals) - if len(block.sequences) == 0 { - continue - } - seqs += len(block.sequences) - block.genCodes() - addHist(&ll, block.coders.llEnc.Histogram()) - addHist(&ml, block.coders.mlEnc.Histogram()) - addHist(&of, block.coders.ofEnc.Histogram()) - for i, seq := range block.sequences { - if i > 3 { - break - } - offset := seq.offset - if offset == 0 { - continue - } - if int(offset) >= len(o.History) { - continue - } - if offset > 3 { - newOffsets[offset-3]++ - } else { - newOffsets[uint32(o.Offsets[offset-1])]++ - } - } - } - // Find most used offsets. - var sortedOffsets []uint32 - for k := range newOffsets { - sortedOffsets = append(sortedOffsets, k) - } - sort.Slice(sortedOffsets, func(i, j int) bool { - a, b := sortedOffsets[i], sortedOffsets[j] - if a == b { - // Prefer the longer offset - return sortedOffsets[i] > sortedOffsets[j] - } - return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]] - }) - if len(sortedOffsets) > 3 { - if debug { - print("Offsets:") - for i, v := range sortedOffsets { - if i > 20 { - break - } - printf("[%d: %d],", v, newOffsets[v]) - } - println("") - } - - sortedOffsets = sortedOffsets[:3] - } - for i, v := range sortedOffsets { - o.Offsets[i] = int(v) - } - if debug { - println("New repeat offsets", o.Offsets) - } - - if nUsed == 0 || seqs == 0 { - return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs) - } - if debug { - println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal) - } - if seqs/nUsed < 512 { - // Use 512 as minimum. - nUsed = seqs / 512 - if nUsed == 0 { - nUsed = 1 - } - } - copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) { - hist := dst.Histogram() - var maxSym uint8 - var maxCount int - var fakeLength int - for i, v := range src { - if v > 0 { - v = v / nUsed - if v == 0 { - v = 1 - } - } - if v > maxCount { - maxCount = v - } - if v != 0 { - maxSym = uint8(i) - } - fakeLength += v - hist[i] = uint32(v) - } - - // Ensure we aren't trying to represent RLE. - if maxCount == fakeLength { - for i := range hist { - if uint8(i) == maxSym { - fakeLength++ - maxSym++ - hist[i+1] = 1 - if maxSym > 1 { - break - } - } - if hist[0] == 0 { - fakeLength++ - hist[i] = 1 - if maxSym > 1 { - break - } - } - } - } - - dst.HistogramFinished(maxSym, maxCount) - dst.reUsed = false - dst.useRLE = false - err := dst.normalizeCount(fakeLength) - if err != nil { - return nil, err - } - if debug { - println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength) - } - return dst.writeCount(nil) - } - if debug { - print("Literal lengths: ") - } - llTable, err := copyHist(block.coders.llEnc, &ll) - if err != nil { - return nil, err - } - if debug { - print("Match lengths: ") - } - mlTable, err := copyHist(block.coders.mlEnc, &ml) - if err != nil { - return nil, err - } - if debug { - print("Offsets: ") - } - ofTable, err := copyHist(block.coders.ofEnc, &of) - if err != nil { - return nil, err - } - - // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } - huffBuff := make([]byte, 0, avgSize) - // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } - if debug { - println("Huffman weights:") - } - for i, n := range remain[:] { - if n > 0 { - n = n / div - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - if debug { - printf("[%d: %d], ", i, n) - } - } - } - if o.CompatV155 && remain[255]/div == 0 { - huffBuff = append(huffBuff, 255) - } - scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { - scratch = &huff0.Scratch{TableLog: 11} - _, _, err = huff0.Compress1X(huffBuff, scratch) - if err == nil { - break - } - if debug { - printf("Try %d: Huffman error: %v\n", tries+1, err) - } - huffBuff = huffBuff[:0] - if tries == 250 { - if debug { - println("Huffman: Bailing out with predefined table") - } - - // Bail out.... Just generate something - huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { - huffBuff = append(huffBuff, byte(i)) - } - continue - } - if errors.Is(err, huff0.ErrIncompressible) { - // Try truncating least common. - for i, n := range remain[:] { - if n > 0 { - n = n / (div * (i + 1)) - if n > 0 { - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 { - huffBuff = append(huffBuff, 255) - } - if len(huffBuff) == 0 { - huffBuff = append(huffBuff, 0, 255) - } - } - if errors.Is(err, huff0.ErrUseRLE) { - for i, n := range remain[:] { - n = n / (div * (i + 1)) - // Allow all entries to be represented. - if n == 0 { - n = 1 - } - huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...) - } - } - } - - var out bytes.Buffer - out.Write([]byte(dictMagic)) - out.Write(binary.LittleEndian.AppendUint32(nil, o.ID)) - out.Write(scratch.OutTable) - if debug { - println("huff table:", len(scratch.OutTable), "bytes") - println("of table:", len(ofTable), "bytes") - println("ml table:", len(mlTable), "bytes") - println("ll table:", len(llTable), "bytes") - } - out.Write(ofTable) - out.Write(mlTable) - out.Write(llTable) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1]))) - out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2]))) - out.Write(hist) - if debug { - _, err := loadDict(out.Bytes()) - if err != nil { - panic(err) - } - i, err := InspectDictionary(out.Bytes()) - if err != nil { - panic(err) - } - println("ID:", i.ID()) - println("Content size:", i.ContentSize()) - println("Encoder:", i.LitEncoder() != nil) - println("Offsets:", i.Offsets()) - var totalSize int - for _, b := range contents { - totalSize += len(b) - } - - encWith := func(opts ...EOption) int { - enc, err := NewWriter(nil, opts...) - if err != nil { - panic(err) - } - defer enc.Close() - var dst []byte - var totalSize int - for _, b := range contents { - dst = enc.EncodeAll(b, dst[:0]) - totalSize += len(dst) - } - return totalSize - } - plain := encWith(WithEncoderLevel(o.Level)) - withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes())) - println("Input size:", totalSize) - println("Plain Compressed:", plain) - println("Dict Compressed:", withDict) - println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)") - } - return out.Bytes(), nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go deleted file mode 100644 index 5ca46038ad..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ /dev/null @@ -1,173 +0,0 @@ -package zstd - -import ( - "fmt" - "math/bits" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -const ( - dictShardBits = 6 -) - -type fastBase struct { - // cur is the offset at the start of hist - cur int32 - // maximum offset. Should be at least 2x block size. - maxMatchOff int32 - bufferReset int32 - hist []byte - crc *xxhash.Digest - tmp [8]byte - blk *blockEnc - lastDictID uint32 - lowMem bool -} - -// CRC returns the underlying CRC writer. -func (e *fastBase) CRC() *xxhash.Digest { - return e.crc -} - -// AppendCRC will append the CRC to the destination slice and return it. -func (e *fastBase) AppendCRC(dst []byte) []byte { - crc := e.crc.Sum(e.tmp[:0]) - dst = append(dst, crc[7], crc[6], crc[5], crc[4]) - return dst -} - -// WindowSize returns the window size of the encoder, -// or a window size small enough to contain the input size, if > 0. -func (e *fastBase) WindowSize(size int64) int32 { - if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } - return b - } - return e.maxMatchOff -} - -// Block returns the current block. -func (e *fastBase) Block() *blockEnc { - return e.blk -} - -func (e *fastBase) addBlock(src []byte) int32 { - if debugAsserts && e.cur > e.bufferReset { - panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset)) - } - // check if we have space already - if len(e.hist)+len(src) > cap(e.hist) { - if cap(e.hist) == 0 { - e.ensureHist(len(src)) - } else { - if cap(e.hist) < int(e.maxMatchOff+maxCompressedBlockSize) { - panic(fmt.Errorf("unexpected buffer cap %d, want at least %d with window %d", cap(e.hist), e.maxMatchOff+maxCompressedBlockSize, e.maxMatchOff)) - } - // Move down - offset := int32(len(e.hist)) - e.maxMatchOff - copy(e.hist[0:e.maxMatchOff], e.hist[offset:]) - e.cur += offset - e.hist = e.hist[:e.maxMatchOff] - } - } - s := int32(len(e.hist)) - e.hist = append(e.hist, src...) - return s -} - -// ensureHist will ensure that history can keep at least this many bytes. -func (e *fastBase) ensureHist(n int) { - if cap(e.hist) >= n { - return - } - l := e.maxMatchOff - if (e.lowMem && e.maxMatchOff > maxCompressedBlockSize) || e.maxMatchOff <= maxCompressedBlockSize { - l += maxCompressedBlockSize - } else { - l += e.maxMatchOff - } - // Make it at least 1MB. - if l < 1<<20 && !e.lowMem { - l = 1 << 20 - } - // Make it at least the requested size. - if l < int32(n) { - l = int32(n) - } - e.hist = make([]byte, 0, l) -} - -// useBlock will replace the block with the provided one, -// but transfer recent offsets from the previous. -func (e *fastBase) UseBlock(enc *blockEnc) { - enc.reset(e.blk) - e.blk = enc -} - -func (e *fastBase) matchlen(s, t int32, src []byte) int32 { - if debugAsserts { - if s < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) - panic(err) - } - if s-t > e.maxMatchOff { - err := fmt.Sprintf("s (%d) - t (%d) > maxMatchOff (%d)", s, t, e.maxMatchOff) - panic(err) - } - if len(src)-int(s) > maxCompressedBlockSize { - panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize)) - } - } - return int32(matchLen(src[s:], src[t:])) -} - -// Reset the encoding table. -func (e *fastBase) resetBase(d *dict, singleBlock bool) { - if e.blk == nil { - e.blk = &blockEnc{lowMem: e.lowMem} - e.blk.init() - } else { - e.blk.reset(nil) - } - e.blk.initNewEncode() - if e.crc == nil { - e.crc = xxhash.New() - } else { - e.crc.Reset() - } - e.blk.dictLitEnc = nil - if d != nil { - low := e.lowMem - if singleBlock { - e.lowMem = true - } - e.ensureHist(d.ContentSize() + maxCompressedBlockSize) - e.lowMem = low - } - - // We offset current position so everything will be out of reach. - // If above reset line, history will be purged. - if e.cur < e.bufferReset { - e.cur += e.maxMatchOff + int32(len(e.hist)) - } - e.hist = e.hist[:0] - if d != nil { - // Set offsets (currently not used) - for i, off := range d.offsets { - e.blk.recentOffsets[i] = uint32(off) - e.blk.prevRecentOffsets[i] = e.blk.recentOffsets[i] - } - // Transfer litenc. - e.blk.dictLitEnc = d.litEnc - e.hist = append(e.hist, d.content...) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go deleted file mode 100644 index 4613724e9d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_best.go +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "bytes" - "fmt" - - "github.com/klauspost/compress" -) - -const ( - bestLongTableBits = 22 // Bits used in the long match table - bestLongTableSize = 1 << bestLongTableBits // Size of the table - bestLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - bestShortTableBits = 18 // Bits used in the short match table - bestShortTableSize = 1 << bestShortTableBits // Size of the table - bestShortLen = 4 // Bytes used for table hash - -) - -type match struct { - offset int32 - s int32 - length int32 - rep int32 - est int32 -} - -const highScore = maxMatchLen * 8 - -// estBits will estimate output bits from predefined tables. -func (m *match) estBits(bitsPerByte int32) { - mlc := mlCode(uint32(m.length - zstdMinMatch)) - var ofc uint8 - if m.rep < 0 { - ofc = ofCode(uint32(m.s-m.offset) + 3) - } else { - ofc = ofCode(uint32(m.rep) & 3) - } - // Cost, excluding - ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc] - - // Add cost of match encoding... - m.est = int32(ofTT.outBits + mlTT.outBits) - m.est += int32(ofTT.deltaNbBits>>16 + mlTT.deltaNbBits>>16) - // Subtract savings compared to literal encoding... - m.est -= (m.length * bitsPerByte) >> 10 - if m.est > 0 { - // Unlikely gain.. - m.length = 0 - m.est = highScore - } -} - -// bestFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type bestFastEncoder struct { - fastBase - table [bestShortTableSize]prevEntry - longTable [bestLongTableSize]prevEntry - dictTable []prevEntry - dictLongTable []prevEntry -} - -// Encode improves compression... -func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 4 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [bestShortTableSize]prevEntry{} - e.longTable = [bestLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - v2 := e.table[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.table[i] = prevEntry{ - offset: v, - prev: v2, - } - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Use this to estimate literal cost. - // Scaled by 10 bits. - bitsPerByte := int32((compress.ShannonEntropyBits(src) * 1024) / len(src)) - // Huffman can never go < 1 bit/byte - if bitsPerByte < 1024 { - bitsPerByte = 1024 - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - const kSearchStrength = 10 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - offset3 := int32(blk.recentOffsets[2]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - const goodEnough = 250 - - cv := load6432(src, s) - - nextHashL := hashLen(cv, bestLongTableBits, bestLongLen) - nextHashS := hashLen(cv, bestShortTableBits, bestShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - // Set m to a match at offset if it looks like that will improve compression. - improve := func(m *match, offset int32, s int32, first uint32, rep int32) { - delta := s - offset - if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first { - return - } - // Try to quick reject if we already have a long match. - if m.length > 16 { - left := len(src) - int(m.s+m.length) - // If we are too close to the end, keep as is. - if left <= 0 { - return - } - checkLen := m.length - (s - m.s) - 8 - if left > 2 && checkLen > 4 { - // Check 4 bytes, 4 bytes from the end of the current match. - a := load3232(src, offset+checkLen) - b := load3232(src, s+checkLen) - if a != b { - return - } - } - } - l := 4 + e.matchlen(s+4, offset+4, src) - if m.rep <= 0 { - // Extend candidate match backwards as far as possible. - // Do not extend repeats as we can assume they are optimal - // and offsets change if s == nextEmit. - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { - s-- - offset-- - l++ - } - } - if debugAsserts { - if offset >= s { - panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff)) - } - if !bytes.Equal(src[s:s+l], src[offset:offset+l]) { - panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first)) - } - } - cand := match{offset: offset, s: s, length: l, rep: rep} - cand.estBits(bitsPerByte) - if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 { - *m = cand - } - } - - best := match{s: s, est: highScore} - improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1) - improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1) - - if canRepeat && best.length < goodEnough { - if s == nextEmit { - // Check repeats straight after a match. - improve(&best, s-offset2, s, uint32(cv), 1|4) - improve(&best, s-offset3, s, uint32(cv), 2|4) - if offset1 > 1 { - improve(&best, s-(offset1-1), s, uint32(cv), 3|4) - } - } - - // If either no match or a non-repeat match, check at + 1 - if best.rep <= 0 { - cv32 := uint32(cv >> 8) - spp := s + 1 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - if best.rep < 0 { - cv32 = uint32(cv >> 24) - spp += 2 - improve(&best, spp-offset1, spp, cv32, 1) - improve(&best, spp-offset2, spp, cv32, 2) - improve(&best, spp-offset3, spp, cv32, 3) - } - } - } - // Load next and check... - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset} - e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset} - index0 := s + 1 - - // Look far ahead, unless we have a really long match already... - if best.length < goodEnough { - // No match found, move forward on input, no need to check forward... - if best.length < 4 { - s += 1 + (s-nextEmit)>>(kSearchStrength-1) - if s >= sLimit { - break encodeLoop - } - continue - } - - candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)] - cv = load6432(src, s+1) - cv2 := load6432(src, s+2) - candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)] - candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)] - - // Short at s+1 - improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1) - // Long at s+1, s+2 - improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1) - improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1) - improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1) - if false { - // Short at s+3. - // Too often worse... - improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1) - } - - // Start check at a fixed offset to allow for a few mismatches. - // For this compression level 2 yields the best results. - // We cannot do this if we have already indexed this position. - const skipBeginning = 2 - if best.s > s-skipBeginning { - // See if we can find a better match by checking where the current best ends. - // Use that offset to see if we can find a better full match. - if sAt := best.s + best.length; sAt < sLimit { - nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen) - candidateEnd := e.longTable[nextHashL] - - if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 { - improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1) - } - } - } - } - } - - if debugAsserts { - if best.offset >= best.s { - panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s)) - } - if best.s < nextEmit { - panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit)) - } - if best.offset < s-e.maxMatchOff { - panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff)) - } - if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) { - panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length])) - } - } - - // We have a match, we can store the forward value - s = best.s - if best.rep > 0 { - var seq seq - seq.matchLen = uint32(best.length - zstdMinMatch) - addLiterals(&seq, best.s) - - // Repeat. If bit 4 is set, this is a non-lit repeat. - seq.offset = uint32(best.rep & 3) - if debugSequences { - println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset) - } - blk.sequences = append(blk.sequences, seq) - - // Index old s + 1 -> s - 1 - s = best.s + best.length - nextEmit = s - - // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - off++ - index0++ - } - - switch best.rep { - case 2, 4 | 1: - offset1, offset2 = offset2, offset1 - case 3, 4 | 2: - offset1, offset2, offset3 = offset3, offset1, offset2 - case 4 | 3: - offset1, offset2, offset3 = offset1-1, offset1, offset2 - } - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, best.length) - } - break encodeLoop - } - continue - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - t := best.offset - offset1, offset2, offset3 = s-t, offset1, offset2 - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && int(offset1) > len(src) { - panic("invalid offset") - } - - // Write our sequence - var seq seq - l := best.length - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - - // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } - - off := index0 + e.cur - for index0 < end { - cv0 := load6432(src, index0) - h0 := hashLen(cv0, bestLongTableBits, bestLongLen) - h1 := hashLen(cv0, bestShortTableBits, bestShortLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset} - index0++ - off++ - } - if s >= sLimit { - break encodeLoop - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - blk.recentOffsets[2] = uint32(offset3) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *bestFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Reset will reset and set a dictionary if not nil -func (e *bestFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]prevEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = bestShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, bestShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, bestShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, bestShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, bestShortLen) // 3 -> 7 - e.dictTable[nextHash] = prevEntry{ - prev: e.dictTable[nextHash].offset, - offset: i, - } - e.dictTable[nextHash1] = prevEntry{ - prev: e.dictTable[nextHash1].offset, - offset: i + 1, - } - e.dictTable[nextHash2] = prevEntry{ - prev: e.dictTable[nextHash2].offset, - offset: i + 2, - } - e.dictTable[nextHash3] = prevEntry{ - prev: e.dictTable[nextHash3].offset, - offset: i + 3, - } - } - e.lastDictID = d.id - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, bestLongTableBits, bestLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - } - // Reset table to initial state - copy(e.longTable[:], e.dictLongTable) - - e.cur = e.maxMatchOff - // Reset table to initial state - copy(e.table[:], e.dictTable) -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go deleted file mode 100644 index 84a79fde76..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ /dev/null @@ -1,1252 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - betterLongTableBits = 19 // Bits used in the long match table - betterLongTableSize = 1 << betterLongTableBits // Size of the table - betterLongLen = 8 // Bytes used for table hash - - // Note: Increasing the short table bits or making the hash shorter - // can actually lead to compression degradation since it will 'steal' more from the - // long match table and match offsets are quite big. - // This greatly depends on the type of input. - betterShortTableBits = 13 // Bits used in the short match table - betterShortTableSize = 1 << betterShortTableBits // Size of the table - betterShortLen = 5 // Bytes used for table hash - - betterLongTableShardCnt = 1 << (betterLongTableBits - dictShardBits) // Number of shards in the table - betterLongTableShardSize = betterLongTableSize / betterLongTableShardCnt // Size of an individual shard - - betterShortTableShardCnt = 1 << (betterShortTableBits - dictShardBits) // Number of shards in the table - betterShortTableShardSize = betterShortTableSize / betterShortTableShardCnt // Size of an individual shard -) - -type prevEntry struct { - offset int32 - prev int32 -} - -// betterFastEncoder uses 2 tables, one for short matches (5 bytes) and one for long matches. -// The long match table contains the previous entry with the same hash, -// effectively making it a "chain" of length 2. -// When we find a long match we choose between the two values and select the longest. -// When we find a short match, after checking the long, we check if we can find a long at n+1 -// and that it is longer (lazy matching). -type betterFastEncoder struct { - fastBase - table [betterShortTableSize]tableEntry - longTable [betterLongTableSize]prevEntry -} - -type betterFastEncoderDict struct { - betterFastEncoder - dictTable []tableEntry - dictLongTable []prevEntry - shortTableShardDirty [betterShortTableShardCnt]bool - longTableShardDirty [betterLongTableShardCnt]bool - allDirty bool -} - -// Encode improves compression... -func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [betterShortTableSize]tableEntry{} - e.longTable = [betterLongTableSize]prevEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.cur = e.maxMatchOff - break - } - // Add block to history - s := e.addBlock(src) - blk.size = len(src) - - // Check RLE first - if len(src) > zstdMinMatch { - ml := matchLen(src[1:], src) - if ml == len(src)-1 { - blk.literals = append(blk.literals, src[0]) - blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3}) - return - } - } - - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - index0 := s + repOff - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - // Allow some bytes at the beginning to mismatch. - // Sweet spot is around 3 bytes, but depends on input. - // The skipped bytes are tested in Extend backwards, - // and still picked up as part of the match if they do. - const skipBeginning = 3 - - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - s2 := s + skipBeginning - cv := load3232(src, s2) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched + skipBeginning - if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - s = s2 - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)} - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *betterFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - e.ensureHist(len(src)) - e.Encode(blk, src) -} - -// Encode improves compression... -func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = prevEntry{} - } - e.cur = e.maxMatchOff - e.allDirty = true - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - v2 := e.longTable[i].prev - if v < minOff { - v = 0 - v2 = 0 - } else { - v = v - e.cur + e.maxMatchOff - if v2 < minOff { - v2 = 0 - } else { - v2 = v2 - e.cur + e.maxMatchOff - } - } - e.longTable[i] = prevEntry{ - offset: v, - prev: v2, - } - } - e.allDirty = true - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 9 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - var matched, index0 int32 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - off := s + e.cur - e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - index0 = s + 1 - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Index match start+1 (long) -> s - 1 - s += length + repOff - - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - continue - } - const repOff2 = 1 - - // We deviate from the reference encoder and also check offset 2. - // Still slower and not much better, so disabled. - // repIndex = s - offset2 + repOff2 - if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { - // Consider history as well. - var seq seq - length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 2 - seq.offset = 2 - if debugSequences { - println("repeat sequence 2", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - s += length + repOff2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - - // Index skipped... - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - off := index0 + e.cur - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - } - cv = load6432(src, s) - // Swap offsets - offset1, offset2 = offset2, offset1 - continue - } - } - // Find the offsets of our two matches. - coffsetL := candidateL.offset - e.cur - coffsetLP := candidateL.prev - e.cur - - // Check if we have a long match. - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetL+8, src) + 8 - t = coffsetL - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - prevMatch := e.matchlen(s+8, coffsetLP+8, src) + 8 - if prevMatch > matched { - matched = prevMatch - t = coffsetLP - } - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - } - break - } - - // Check if we have a long match on prev. - if s-coffsetLP < e.maxMatchOff && cv == load6432(src, coffsetLP) { - // Found a long match, at least 8 bytes. - matched = e.matchlen(s+8, coffsetLP+8, src) + 8 - t = coffsetLP - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - coffsetS := candidateS.offset - e.cur - - // Check if we have a short match. - if s-coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - matched = e.matchlen(s+4, coffsetS+4, src) + 4 - - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, betterLongTableBits, betterLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = candidateL.offset - e.cur - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = prevEntry{offset: s + checkAt + e.cur, prev: candidateL.offset} - e.markLongShardDirty(nextHashL) - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("long match (after short)") - } - break - } - } - - // Check prev long... - coffsetL = candidateL.prev - e.cur - if s-coffsetL < e.maxMatchOff && cv == load6432(src, coffsetL) { - // Found a long match, at least 8 bytes. - matchedNext := e.matchlen(s+8+checkAt, coffsetL+8, src) + 8 - if matchedNext > matched { - t = coffsetL - s += checkAt - matched = matchedNext - if debugMatches { - println("prev long match (after short)") - } - break - } - } - t = coffsetS - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // Try to find a better match by searching for a long match at the end of the current best match - if s+matched < sLimit { - nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen) - cv := load3232(src, s) - candidateL := e.longTable[nextHashL] - coffsetL := candidateL.offset - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("long match at end-of-match") - } - } - } - - // Check prev long... - if true { - coffsetL = candidateL.prev - e.cur - matched - if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) { - // Found a long match, at least 4 bytes. - matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4 - if matchedNext > matched { - t = coffsetL - matched = matchedNext - if debugMatches { - println("prev long match at end-of-match") - } - } - } - } - } - // A match has been found. Update recent offsets. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the n-byte match as long as possible. - l := matched - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) -> s - 1 - off := index0 + e.cur - for index0 < s-1 { - cv0 := load6432(src, index0) - cv1 := cv0 >> 8 - h0 := hashLen(cv0, betterLongTableBits, betterLongLen) - e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset} - e.markLongShardDirty(h0) - h1 := hashLen(cv1, betterShortTableBits, betterShortLen) - e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)} - e.markShortShardDirty(h1) - index0 += 2 - off += 2 - } - - cv = load6432(src, s) - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, betterLongTableBits, betterLongLen) - nextHashS := hashLen(cv, betterShortTableBits, betterShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset} - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShortShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("betterFastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *betterFastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff; i < end; i += 4 { - const hashLog = betterShortTableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, betterShortLen) // 0 -> 4 - nextHash1 := hashLen(cv>>8, hashLog, betterShortLen) // 1 -> 5 - nextHash2 := hashLen(cv>>16, hashLog, betterShortLen) // 2 -> 6 - nextHash3 := hashLen(cv>>24, hashLog, betterShortLen) // 3 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - e.dictTable[nextHash2] = tableEntry{ - val: uint32(cv >> 16), - offset: i + 2, - } - e.dictTable[nextHash3] = tableEntry{ - val: uint32(cv >> 24), - offset: i + 3, - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]prevEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: e.maxMatchOff, - prev: e.dictLongTable[h].offset, - } - - end := int32(len(d.content)) - 8 + e.maxMatchOff - off := 8 // First to read - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[off]) << 56) - h := hashLen(cv, betterLongTableBits, betterLongLen) - e.dictLongTable[h] = prevEntry{ - offset: i, - prev: e.dictLongTable[h].offset, - } - off++ - } - } - e.lastDictID = d.id - e.allDirty = true - } - - // Reset table to initial state - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterShortTableShardCnt - const shardSize = betterShortTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.table[:], e.dictTable) - for i := range e.shortTableShardDirty { - e.shortTableShardDirty[i] = false - } - } else { - for i := range e.shortTableShardDirty { - if !e.shortTableShardDirty[i] { - continue - } - - copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - e.shortTableShardDirty[i] = false - } - } - } - { - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.shortTableShardDirty { - if e.shortTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - const shardCnt = betterLongTableShardCnt - const shardSize = betterLongTableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - copy(e.longTable[:], e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - } else { - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - copy(e.longTable[i*shardSize:(i+1)*shardSize], e.dictLongTable[i*shardSize:(i+1)*shardSize]) - e.longTableShardDirty[i] = false - } - } - } - e.cur = e.maxMatchOff - e.allDirty = false -} - -func (e *betterFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/betterLongTableShardSize] = true -} - -func (e *betterFastEncoderDict) markShortShardDirty(entryNum uint32) { - e.shortTableShardDirty[entryNum/betterShortTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go deleted file mode 100644 index d36be7bd8c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ /dev/null @@ -1,1123 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "fmt" - -const ( - dFastLongTableBits = 17 // Bits used in the long match table - dFastLongTableSize = 1 << dFastLongTableBits // Size of the table - dFastLongTableMask = dFastLongTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastLongLen = 8 // Bytes used for table hash - - dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard - - dFastShortTableBits = tableBits // Bits used in the short match table - dFastShortTableSize = 1 << dFastShortTableBits // Size of the table - dFastShortTableMask = dFastShortTableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - dFastShortLen = 5 // Bytes used for table hash - -) - -type doubleFastEncoder struct { - fastEncoder - longTable [dFastLongTableSize]tableEntry -} - -type doubleFastEncoderDict struct { - fastEncoderDict - longTable [dFastLongTableSize]tableEntry - dictLongTable []tableEntry - longTableShardDirty [dLongTableShardCnt]bool -} - -// Encode mimmics functionality in zstd_dfast.c -func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [dFastShortTableSize]tableEntry{} - e.longTable = [dFastLongTableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - for { - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - - if len(blk.sequences) > 2 { - if load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - //length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - length := 4 + int32(matchLen(src[s+4+repOff:], src[repIndex+4:])) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d). cur: %d", s, t, e.cur)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - // Extend the 4-byte match as long as possible. - //l := e.matchlen(s+4, t+4, src) + 4 - l := int32(matchLen(src[s+4:], src[t+4:])) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - e.longTable[hashLen(cv0, dFastLongTableBits, dFastLongLen)] = te0 - e.longTable[hashLen(cv1, dFastLongTableBits, dFastLongLen)] = te1 - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - e.table[hashLen(cv0, dFastShortTableBits, dFastShortLen)] = te0 - e.table[hashLen(cv1, dFastShortTableBits, dFastShortLen)] = te1 - - cv = load6432(src, s) - - if len(blk.sequences) <= 2 { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashS := hashLen(cv1>>8, dFastShortTableBits, dFastShortLen) - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - //l := 4 + e.matchlen(s+4, o2+4, src) - l := 4 + int32(matchLen(src[s+4:], src[o2+4:])) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.table[nextHashS] = entry - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - // Input margin is the number of bytes we read (8) - // and the maximum we will read ahead (2) - inputMargin = 8 + 2 - minNonLiteralBlockSize = 16 - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - for i := range e.longTable[:] { - e.longTable[i] = tableEntry{} - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - for i := range e.longTable[:] { - v := e.longTable[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.longTable[i].offset = v - } - e.markAllShardsDirty() - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 1. - const stepSize = 1 - - const kSearchStrength = 8 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - var t int32 - // We allow the encoder to optionally turn off repeat offsets across blocks - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - candidateL := e.longTable[nextHashL] - candidateS := e.table[nextHashS] - - const repOff = 1 - repIndex := s - offset1 + repOff - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - - if canRepeat { - if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + repOff - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + repOff - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - } - // Find the offsets of our two matches. - coffsetL := s - (candidateL.offset - e.cur) - coffsetS := s - (candidateS.offset - e.cur) - - // Check if we have a long match. - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugMatches { - println("long match") - } - break - } - - // Check if we have a short match. - if coffsetS < e.maxMatchOff && uint32(cv) == candidateS.val { - // found a regular match - // See if we can find a long match at s+1 - const checkAt = 1 - cv := load6432(src, s+checkAt) - nextHashL = hashLen(cv, dFastLongTableBits, dFastLongLen) - candidateL = e.longTable[nextHashL] - coffsetL = s - (candidateL.offset - e.cur) + checkAt - - // We can store it, since we have at least a 4 byte match. - e.longTable[nextHashL] = tableEntry{offset: s + checkAt + e.cur, val: uint32(cv)} - e.markLongShardDirty(nextHashL) - if coffsetL < e.maxMatchOff && uint32(cv) == candidateL.val { - // Found a long match, likely at least 8 bytes. - // Reference encoder checks all 8 bytes, we only check 4, - // but the likelihood of both the first 4 bytes and the hash matching should be enough. - t = candidateL.offset - e.cur - s += checkAt - if debugMatches { - println("long match (after short)") - } - break - } - - t = candidateS.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - if debugMatches { - println("short match") - } - break - } - - // No match found, move forward in input. - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - - // A 4-byte match has been found. Update recent offsets. - // We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - - // Index match start+1 (long) and start+2 (short) - index0 := s - l + 1 - // Index match end-2 (long) and end-1 (short) - index1 := s - 2 - - cv0 := load6432(src, index0) - cv1 := load6432(src, index1) - te0 := tableEntry{offset: index0 + e.cur, val: uint32(cv0)} - te1 := tableEntry{offset: index1 + e.cur, val: uint32(cv1)} - longHash1 := hashLen(cv0, dFastLongTableBits, dFastLongLen) - longHash2 := hashLen(cv1, dFastLongTableBits, dFastLongLen) - e.longTable[longHash1] = te0 - e.longTable[longHash2] = te1 - e.markLongShardDirty(longHash1) - e.markLongShardDirty(longHash2) - cv0 >>= 8 - cv1 >>= 8 - te0.offset++ - te1.offset++ - te0.val = uint32(cv0) - te1.val = uint32(cv1) - hashVal1 := hashLen(cv0, dFastShortTableBits, dFastShortLen) - hashVal2 := hashLen(cv1, dFastShortTableBits, dFastShortLen) - e.table[hashVal1] = te0 - e.markShardDirty(hashVal1) - e.table[hashVal2] = te1 - e.markShardDirty(hashVal2) - - cv = load6432(src, s) - - if !canRepeat { - continue - } - - // Check offset 2 - for { - o2 := s - offset2 - if load3232(src, o2) != uint32(cv) { - // Do regular search - break - } - - // Store this, since we have it. - nextHashL := hashLen(cv, dFastLongTableBits, dFastLongLen) - nextHashS := hashLen(cv, dFastShortTableBits, dFastShortLen) - - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - entry := tableEntry{offset: s + e.cur, val: uint32(cv)} - e.longTable[nextHashL] = entry - e.markLongShardDirty(nextHashL) - e.table[nextHashS] = entry - e.markShardDirty(nextHashS) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - // Finished - break encodeLoop - } - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // If we encoded more than 64K mark all dirty. - if len(src) > 64<<10 { - e.markAllShardsDirty() - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoder) Reset(d *dict, singleBlock bool) { - e.fastEncoder.Reset(d, singleBlock) - if d != nil { - panic("doubleFastEncoder: Reset with dict not supported") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) { - allDirty := e.allDirty - e.fastEncoderDict.Reset(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictLongTable) != len(e.longTable) || d.id != e.lastDictID { - if len(e.dictLongTable) != len(e.longTable) { - e.dictLongTable = make([]tableEntry, len(e.longTable)) - } - if len(d.content) >= 8 { - cv := load6432(d.content, 0) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: e.maxMatchOff, - } - end := int32(len(d.content)) - 8 + e.maxMatchOff - for i := e.maxMatchOff + 1; i < end; i++ { - cv = cv>>8 | (uint64(d.content[i-e.maxMatchOff+7]) << 56) - e.dictLongTable[hashLen(cv, dFastLongTableBits, dFastLongLen)] = tableEntry{ - val: uint32(cv), - offset: i, - } - } - } - e.lastDictID = d.id - allDirty = true - } - // Reset table to initial state - e.cur = e.maxMatchOff - - dirtyShardCnt := 0 - if !allDirty { - for i := range e.longTableShardDirty { - if e.longTableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - if allDirty || dirtyShardCnt > dLongTableShardCnt/2 { - //copy(e.longTable[:], e.dictLongTable) - e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable) - for i := range e.longTableShardDirty { - e.longTableShardDirty[i] = false - } - return - } - for i := range e.longTableShardDirty { - if !e.longTableShardDirty[i] { - continue - } - - // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize]) - *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:]) - - e.longTableShardDirty[i] = false - } -} - -func (e *doubleFastEncoderDict) markLongShardDirty(entryNum uint32) { - e.longTableShardDirty[entryNum/dLongTableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go deleted file mode 100644 index f45a3da7da..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ /dev/null @@ -1,891 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" -) - -const ( - tableBits = 15 // Bits used in the table - tableSize = 1 << tableBits // Size of the table - tableShardCnt = 1 << (tableBits - dictShardBits) // Number of shards in the table - tableShardSize = tableSize / tableShardCnt // Size of an individual shard - tableFastHashLen = 6 - tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. - maxMatchLength = 131074 -) - -type tableEntry struct { - val uint32 - offset int32 -} - -type fastEncoder struct { - fastBase - table [tableSize]tableEntry -} - -type fastEncoderDict struct { - fastEncoder - dictTable []tableEntry - tableShardDirty [tableShardCnt]bool - allDirty bool -} - -// Encode mimmics functionality in zstd_fast.c -func (e *fastEncoder) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// EncodeNoHist will encode a block with no history and no following blocks. -// Most notable difference is that src will not be copied for history and -// we do not need to check for max match length. -func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if debugEncoder { - if len(src) > maxCompressedBlockSize { - panic("src too big") - } - } - - // Protect against e.cur wraparound. - if e.cur >= e.bufferReset { - for i := range e.table[:] { - e.table[i] = tableEntry{} - } - e.cur = e.maxMatchOff - } - - s := int32(0) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 6 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - - for { - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - - if len(blk.sequences) > 2 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0, candidate.offset: %d, e.cur: %d, coffset0: %d, e.maxMatchOff: %d", t, candidate.offset, e.cur, coffset0, e.maxMatchOff)) - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && t < 0 { - panic(fmt.Sprintf("t (%d) < 0 ", t)) - } - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; len(blk.sequences) > 2 && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } - // We do not store history, so we must offset e.cur to avoid false matches for next user. - if e.cur < e.bufferReset { - e.cur += int32(len(src)) - } -} - -// Encode will encode the content, with a dictionary if initialized for it. -func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) { - const ( - inputMargin = 8 - minNonLiteralBlockSize = 1 + 1 + inputMargin - ) - if e.allDirty || len(src) > 32<<10 { - e.fastEncoder.Encode(blk, src) - e.allDirty = true - return - } - // Protect against e.cur wraparound. - for e.cur >= e.bufferReset-int32(len(e.hist)) { - if len(e.hist) == 0 { - e.table = [tableSize]tableEntry{} - e.cur = e.maxMatchOff - break - } - // Shift down everything in the table that isn't already too far away. - minOff := e.cur + int32(len(e.hist)) - e.maxMatchOff - for i := range e.table[:] { - v := e.table[i].offset - if v < minOff { - v = 0 - } else { - v = v - e.cur + e.maxMatchOff - } - e.table[i].offset = v - } - e.cur = e.maxMatchOff - break - } - - s := e.addBlock(src) - blk.size = len(src) - if len(src) < minNonLiteralBlockSize { - blk.extraLits = len(src) - blk.literals = blk.literals[:len(src)] - copy(blk.literals, src) - return - } - - // Override src - src = e.hist - sLimit := int32(len(src)) - inputMargin - // stepSize is the number of bytes to skip on every main loop iteration. - // It should be >= 2. - const stepSize = 2 - - // TEMPLATE - const hashLog = tableBits - // seems global, but would be nice to tweak. - const kSearchStrength = 7 - - // nextEmit is where in src the next emitLiteral should start from. - nextEmit := s - cv := load6432(src, s) - - // Relative offsets - offset1 := int32(blk.recentOffsets[0]) - offset2 := int32(blk.recentOffsets[1]) - - addLiterals := func(s *seq, until int32) { - if until == nextEmit { - return - } - blk.literals = append(blk.literals, src[nextEmit:until]...) - s.litLen = uint32(until - nextEmit) - } - if debugEncoder { - println("recent offsets:", blk.recentOffsets) - } - -encodeLoop: - for { - // t will contain the match offset when we find one. - // When existing the search loop, we have already checked 4 bytes. - var t int32 - - // We will not use repeat offsets across blocks. - // By not using them for the first 3 matches - canRepeat := len(blk.sequences) > 2 - - for { - if debugAsserts && canRepeat && offset1 == 0 { - panic("offset0 was 0") - } - - nextHash := hashLen(cv, hashLog, tableFastHashLen) - nextHash2 := hashLen(cv>>8, hashLog, tableFastHashLen) - candidate := e.table[nextHash] - candidate2 := e.table[nextHash2] - repIndex := s - offset1 + 2 - - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - e.table[nextHash2] = tableEntry{offset: s + e.cur + 1, val: uint32(cv >> 8)} - e.markShardDirty(nextHash2) - - if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) { - // Consider history as well. - var seq seq - length := 4 + e.matchlen(s+6, repIndex+4, src) - - seq.matchLen = uint32(length - zstdMinMatch) - - // We might be able to match backwards. - // Extend as long as we can. - start := s + 2 - // We end the search early, so we don't risk 0 literals - // and have to do special offset treatment. - startLimit := nextEmit + 1 - - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } - for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { - repIndex-- - start-- - seq.matchLen++ - } - addLiterals(&seq, start) - - // rep 0 - seq.offset = 1 - if debugSequences { - println("repeat sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - s += length + 2 - nextEmit = s - if s >= sLimit { - if debugEncoder { - println("repeat ended", s, length) - - } - break encodeLoop - } - cv = load6432(src, s) - continue - } - coffset0 := s - (candidate.offset - e.cur) - coffset1 := s - (candidate2.offset - e.cur) + 1 - if coffset0 < e.maxMatchOff && uint32(cv) == candidate.val { - // found a regular match - t = candidate.offset - e.cur - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - break - } - - if coffset1 < e.maxMatchOff && uint32(cv>>8) == candidate2.val { - // found a regular match - t = candidate2.offset - e.cur - s++ - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - if debugAsserts && s-t > e.maxMatchOff { - panic("s - t >e.maxMatchOff") - } - if debugAsserts && t < 0 { - panic("t<0") - } - break - } - s += stepSize + ((s - nextEmit) >> (kSearchStrength - 1)) - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - } - // A 4-byte match has been found. We'll later see if more than 4 bytes. - offset2 = offset1 - offset1 = s - t - - if debugAsserts && s <= t { - panic(fmt.Sprintf("s (%d) <= t (%d)", s, t)) - } - - if debugAsserts && canRepeat && int(offset1) > len(src) { - panic("invalid offset") - } - - // Extend the 4-byte match as long as possible. - l := e.matchlen(s+4, t+4, src) + 4 - - // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } - for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { - s-- - t-- - l++ - } - - // Write our sequence. - var seq seq - seq.litLen = uint32(s - nextEmit) - seq.matchLen = uint32(l - zstdMinMatch) - if seq.litLen > 0 { - blk.literals = append(blk.literals, src[nextEmit:s]...) - } - // Don't use repeat offsets - seq.offset = uint32(s-t) + 3 - s += l - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - nextEmit = s - if s >= sLimit { - break encodeLoop - } - cv = load6432(src, s) - - // Check offset 2 - if o2 := s - offset2; canRepeat && load3232(src, o2) == uint32(cv) { - // We have at least 4 byte match. - // No need to check backwards. We come straight from a match - l := 4 + e.matchlen(s+4, o2+4, src) - - // Store this, since we have it. - nextHash := hashLen(cv, hashLog, tableFastHashLen) - e.table[nextHash] = tableEntry{offset: s + e.cur, val: uint32(cv)} - e.markShardDirty(nextHash) - seq.matchLen = uint32(l) - zstdMinMatch - seq.litLen = 0 - // Since litlen is always 0, this is offset 1. - seq.offset = 1 - s += l - nextEmit = s - if debugSequences { - println("sequence", seq, "next s:", s) - } - blk.sequences = append(blk.sequences, seq) - - // Swap offset 1 and 2. - offset1, offset2 = offset2, offset1 - if s >= sLimit { - break encodeLoop - } - // Prepare next loop. - cv = load6432(src, s) - } - } - - if int(nextEmit) < len(src) { - blk.literals = append(blk.literals, src[nextEmit:]...) - blk.extraLits = len(src) - int(nextEmit) - } - blk.recentOffsets[0] = uint32(offset1) - blk.recentOffsets[1] = uint32(offset2) - if debugEncoder { - println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits) - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoder) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d != nil { - panic("fastEncoder: Reset with dict") - } -} - -// ResetDict will reset and set a dictionary if not nil -func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) { - e.resetBase(d, singleBlock) - if d == nil { - return - } - - // Init or copy dict table - if len(e.dictTable) != len(e.table) || d.id != e.lastDictID { - if len(e.dictTable) != len(e.table) { - e.dictTable = make([]tableEntry, len(e.table)) - } - if true { - end := e.maxMatchOff + int32(len(d.content)) - 8 - for i := e.maxMatchOff; i < end; i += 2 { - const hashLog = tableBits - - cv := load6432(d.content, i-e.maxMatchOff) - nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6 - nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7 - e.dictTable[nextHash] = tableEntry{ - val: uint32(cv), - offset: i, - } - e.dictTable[nextHash1] = tableEntry{ - val: uint32(cv >> 8), - offset: i + 1, - } - } - } - e.lastDictID = d.id - e.allDirty = true - } - - e.cur = e.maxMatchOff - dirtyShardCnt := 0 - if !e.allDirty { - for i := range e.tableShardDirty { - if e.tableShardDirty[i] { - dirtyShardCnt++ - } - } - } - - const shardCnt = tableShardCnt - const shardSize = tableShardSize - if e.allDirty || dirtyShardCnt > shardCnt*4/6 { - //copy(e.table[:], e.dictTable) - e.table = *(*[tableSize]tableEntry)(e.dictTable) - for i := range e.tableShardDirty { - e.tableShardDirty[i] = false - } - e.allDirty = false - return - } - for i := range e.tableShardDirty { - if !e.tableShardDirty[i] { - continue - } - - //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize]) - *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:]) - e.tableShardDirty[i] = false - } - e.allDirty = false -} - -func (e *fastEncoderDict) markAllShardsDirty() { - e.allDirty = true -} - -func (e *fastEncoderDict) markShardDirty(entryNum uint32) { - e.tableShardDirty[entryNum/tableShardSize] = true -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go deleted file mode 100644 index a79c4a527c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ /dev/null @@ -1,622 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "crypto/rand" - "fmt" - "io" - "math" - rdebug "runtime/debug" - "sync" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -// Encoder provides encoding to Zstandard. -// An Encoder can be used for either compressing a stream via the -// io.WriteCloser interface supported by the Encoder or as multiple independent -// tasks via the EncodeAll function. -// Smaller encodes are encouraged to use the EncodeAll function. -// Use NewWriter to create a new instance. -type Encoder struct { - o encoderOptions - encoders chan encoder - state encoderState - init sync.Once -} - -type encoder interface { - Encode(blk *blockEnc, src []byte) - EncodeNoHist(blk *blockEnc, src []byte) - Block() *blockEnc - CRC() *xxhash.Digest - AppendCRC([]byte) []byte - WindowSize(size int64) int32 - UseBlock(*blockEnc) - Reset(d *dict, singleBlock bool) -} - -type encoderState struct { - w io.Writer - filling []byte - current []byte - previous []byte - encoder encoder - writing *blockEnc - err error - writeErr error - nWritten int64 - nInput int64 - frameContentSize int64 - headerWritten bool - eofWritten bool - fullFrameWritten bool - - // This waitgroup indicates an encode is running. - wg sync.WaitGroup - // This waitgroup indicates we have a block encoding/writing. - wWg sync.WaitGroup -} - -// NewWriter will create a new Zstandard encoder. -// If the encoder will be used for encoding blocks a nil writer can be used. -func NewWriter(w io.Writer, opts ...EOption) (*Encoder, error) { - initPredefined() - var e Encoder - e.o.setDefault() - for _, o := range opts { - err := o(&e.o) - if err != nil { - return nil, err - } - } - if w != nil { - e.Reset(w) - } - return &e, nil -} - -func (e *Encoder) initialize() { - if e.o.concurrent == 0 { - e.o.setDefault() - } - e.encoders = make(chan encoder, e.o.concurrent) - for i := 0; i < e.o.concurrent; i++ { - enc := e.o.encoder() - e.encoders <- enc - } -} - -// Reset will re-initialize the writer and new writes will encode to the supplied writer -// as a new, independent stream. -func (e *Encoder) Reset(w io.Writer) { - s := &e.state - s.wg.Wait() - s.wWg.Wait() - if cap(s.filling) == 0 { - s.filling = make([]byte, 0, e.o.blockSize) - } - if e.o.concurrent > 1 { - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) - } - s.current = s.current[:0] - s.previous = s.previous[:0] - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() - } - if s.encoder == nil { - s.encoder = e.o.encoder() - } - s.filling = s.filling[:0] - s.encoder.Reset(e.o.dict, false) - s.headerWritten = false - s.eofWritten = false - s.fullFrameWritten = false - s.w = w - s.err = nil - s.nWritten = 0 - s.nInput = 0 - s.writeErr = nil - s.frameContentSize = 0 -} - -// ResetContentSize will reset and set a content size for the next stream. -// If the bytes written does not match the size given an error will be returned -// when calling Close(). -// This is removed when Reset is called. -// Sizes <= 0 results in no content size set. -func (e *Encoder) ResetContentSize(w io.Writer, size int64) { - e.Reset(w) - if size >= 0 { - e.state.frameContentSize = size - } -} - -// Write data to the encoder. -// Input data will be buffered and as the buffer fills up -// content will be compressed and written to the output. -// When done writing, use Close to flush the remaining output -// and write CRC if requested. -func (e *Encoder) Write(p []byte) (n int, err error) { - s := &e.state - for len(p) > 0 { - if len(p)+len(s.filling) < e.o.blockSize { - if e.o.crc { - _, _ = s.encoder.CRC().Write(p) - } - s.filling = append(s.filling, p...) - return n + len(p), nil - } - add := p - if len(p)+len(s.filling) > e.o.blockSize { - add = add[:e.o.blockSize-len(s.filling)] - } - if e.o.crc { - _, _ = s.encoder.CRC().Write(add) - } - s.filling = append(s.filling, add...) - p = p[len(add):] - n += len(add) - if len(s.filling) < e.o.blockSize { - return n, nil - } - err := e.nextBlock(false) - if err != nil { - return n, err - } - if debugAsserts && len(s.filling) > 0 { - panic(len(s.filling)) - } - } - return n, nil -} - -// nextBlock will synchronize and start compressing input in e.state.filling. -// If an error has occurred during encoding it will be returned. -func (e *Encoder) nextBlock(final bool) error { - s := &e.state - // Wait for current block. - s.wg.Wait() - if s.err != nil { - return s.err - } - if len(s.filling) > e.o.blockSize { - return fmt.Errorf("block > maxStoreBlockSize") - } - if !s.headerWritten { - // If we have a single block encode, do a sync compression. - if final && len(s.filling) == 0 && !e.o.fullZero { - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - if final && len(s.filling) > 0 { - s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) - var n2 int - n2, s.err = s.w.Write(s.current) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - s.nInput += int64(len(s.filling)) - s.current = s.current[:0] - s.filling = s.filling[:0] - s.headerWritten = true - s.fullFrameWritten = true - s.eofWritten = true - return nil - } - - var tmp [maxHeaderSize]byte - fh := frameHeader{ - ContentSize: uint64(s.frameContentSize), - WindowSize: uint32(s.encoder.WindowSize(s.frameContentSize)), - SingleSegment: false, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - dst := fh.appendTo(tmp[:0]) - s.headerWritten = true - s.wWg.Wait() - var n2 int - n2, s.err = s.w.Write(dst) - if s.err != nil { - return s.err - } - s.nWritten += int64(n2) - } - if s.eofWritten { - // Ensure we only write it once. - final = false - } - - if len(s.filling) == 0 { - // Final block, but no data. - if final { - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - blk.last = true - blk.encodeRaw(nil) - s.wWg.Wait() - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.eofWritten = true - } - return s.err - } - - // SYNC: - if e.o.concurrent == 1 { - src := s.filling - s.nInput += int64(len(s.filling)) - if debugEncoder { - println("Adding sync block,", len(src), "bytes, final:", final) - } - enc := s.encoder - blk := enc.Block() - blk.reset(nil) - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - - s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.err != nil { - return s.err - } - _, s.err = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - s.filling = s.filling[:0] - return s.err - } - - // Move blocks forward. - s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current - s.nInput += int64(len(s.current)) - s.wg.Add(1) - go func(src []byte) { - if debugEncoder { - println("Adding block,", len(src), "bytes, final:", final) - } - defer func() { - if r := recover(); r != nil { - s.err = fmt.Errorf("panic while encoding: %v", r) - rdebug.PrintStack() - } - s.wg.Done() - }() - enc := s.encoder - blk := enc.Block() - enc.Encode(blk, src) - blk.last = final - if final { - s.eofWritten = true - } - // Wait for pending writes. - s.wWg.Wait() - if s.writeErr != nil { - s.err = s.writeErr - return - } - // Transfer encoders from previous write block. - blk.swapEncoders(s.writing) - // Transfer recent offsets to next. - enc.UseBlock(s.writing) - s.writing = blk - s.wWg.Add(1) - go func() { - defer func() { - if r := recover(); r != nil { - s.writeErr = fmt.Errorf("panic while encoding/writing: %v", r) - rdebug.PrintStack() - } - s.wWg.Done() - }() - s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if s.writeErr != nil { - return - } - _, s.writeErr = s.w.Write(blk.output) - s.nWritten += int64(len(blk.output)) - }() - }(s.current) - return nil -} - -// ReadFrom reads data from r until EOF or error. -// The return value n is the number of bytes read. -// Any error except io.EOF encountered during the read is also returned. -// -// The Copy function uses ReaderFrom if available. -func (e *Encoder) ReadFrom(r io.Reader) (n int64, err error) { - if debugEncoder { - println("Using ReadFrom") - } - - // Flush any current writes. - if len(e.state.filling) > 0 { - if err := e.nextBlock(false); err != nil { - return 0, err - } - } - e.state.filling = e.state.filling[:e.o.blockSize] - src := e.state.filling - for { - n2, err := r.Read(src) - if e.o.crc { - _, _ = e.state.encoder.CRC().Write(src[:n2]) - } - // src is now the unfilled part... - src = src[n2:] - n += int64(n2) - switch err { - case io.EOF: - e.state.filling = e.state.filling[:len(e.state.filling)-len(src)] - if debugEncoder { - println("ReadFrom: got EOF final block:", len(e.state.filling)) - } - return n, nil - case nil: - default: - if debugEncoder { - println("ReadFrom: got error:", err) - } - e.state.err = err - return n, err - } - if len(src) > 0 { - if debugEncoder { - println("ReadFrom: got space left in source:", len(src)) - } - continue - } - err = e.nextBlock(false) - if err != nil { - return n, err - } - e.state.filling = e.state.filling[:e.o.blockSize] - src = e.state.filling - } -} - -// Flush will send the currently written data to output -// and block until everything has been written. -// This should only be used on rare occasions where pushing the currently queued data is critical. -func (e *Encoder) Flush() error { - s := &e.state - if len(s.filling) > 0 { - err := e.nextBlock(false) - if err != nil { - return err - } - } - s.wg.Wait() - s.wWg.Wait() - if s.err != nil { - return s.err - } - return s.writeErr -} - -// Close will flush the final output and close the stream. -// The function will block until everything has been written. -// The Encoder can still be re-used after calling this. -func (e *Encoder) Close() error { - s := &e.state - if s.encoder == nil { - return nil - } - err := e.nextBlock(true) - if err != nil { - return err - } - if s.frameContentSize > 0 { - if s.nInput != s.frameContentSize { - return fmt.Errorf("frame content size %d given, but %d bytes was written", s.frameContentSize, s.nInput) - } - } - if e.state.fullFrameWritten { - return s.err - } - s.wg.Wait() - s.wWg.Wait() - - if s.err != nil { - return s.err - } - if s.writeErr != nil { - return s.writeErr - } - - // Write CRC - if e.o.crc && s.err == nil { - // heap alloc. - var tmp [4]byte - _, s.err = s.w.Write(s.encoder.AppendCRC(tmp[:0])) - s.nWritten += 4 - } - - // Add padding with content from crypto/rand.Reader - if s.err == nil && e.o.pad > 0 { - add := calcSkippableFrame(s.nWritten, int64(e.o.pad)) - frame, err := skippableFrame(s.filling[:0], add, rand.Reader) - if err != nil { - return err - } - _, s.err = s.w.Write(frame) - } - return s.err -} - -// EncodeAll will encode all input in src and append it to dst. -// This function can be called concurrently, but each call will only run on a single goroutine. -// If empty input is given, nothing is returned, unless WithZeroFrames is specified. -// Encoded blocks can be concatenated and the result will be the combined input stream. -// Data compressed with EncodeAll can be decoded with the Decoder, -// using either a stream or DecodeAll. -func (e *Encoder) EncodeAll(src, dst []byte) []byte { - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - e.encoders <- enc - }() - return e.encodeAll(enc, src, dst) -} - -func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { - if len(src) == 0 { - if e.o.fullZero { - // Add frame header. - fh := frameHeader{ - ContentSize: 0, - WindowSize: MinWindowSize, - SingleSegment: true, - // Adding a checksum would be a waste of space. - Checksum: false, - DictID: 0, - } - dst = fh.appendTo(dst) - - // Write raw block as last one only. - var blk blockHeader - blk.setSize(0) - blk.setType(blockTypeRaw) - blk.setLast(true) - dst = blk.appendTo(dst) - } - return dst - } - - // Use single segments when above minimum window and below window size. - single := len(src) <= e.o.windowSize && len(src) > MinWindowSize - if e.o.single != nil { - single = *e.o.single - } - fh := frameHeader{ - ContentSize: uint64(len(src)), - WindowSize: uint32(enc.WindowSize(int64(len(src)))), - SingleSegment: single, - Checksum: e.o.crc, - DictID: e.o.dict.ID(), - } - - // If less than 1MB, allocate a buffer up front. - if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem { - dst = make([]byte, 0, len(src)) - } - dst = fh.appendTo(dst) - - // If we can do everything in one block, prefer that. - if len(src) <= e.o.blockSize { - enc.Reset(e.o.dict, true) - // Slightly faster with no history and everything in one block. - if e.o.crc { - _, _ = enc.CRC().Write(src) - } - blk := enc.Block() - blk.last = true - if e.o.dict == nil { - enc.EncodeNoHist(blk, src) - } else { - enc.Encode(blk, src) - } - - // If we got the exact same number of literals as input, - // assume the literals cannot be compressed. - oldout := blk.output - // Output directly to dst - blk.output = dst - - err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = blk.output - blk.output = oldout - } else { - enc.Reset(e.o.dict, false) - blk := enc.Block() - for len(src) > 0 { - todo := src - if len(todo) > e.o.blockSize { - todo = todo[:e.o.blockSize] - } - src = src[len(todo):] - if e.o.crc { - _, _ = enc.CRC().Write(todo) - } - blk.pushOffsets() - enc.Encode(blk, todo) - if len(src) == 0 { - blk.last = true - } - err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy) - if err != nil { - panic(err) - } - dst = append(dst, blk.output...) - blk.reset(nil) - } - } - if e.o.crc { - dst = enc.AppendCRC(dst) - } - // Add padding with content from crypto/rand.Reader - if e.o.pad > 0 { - add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad)) - var err error - dst, err = skippableFrame(dst, add, rand.Reader) - if err != nil { - panic(err) - } - } - return dst -} - -// MaxEncodedSize returns the expected maximum -// size of an encoded block or stream. -func (e *Encoder) MaxEncodedSize(size int) int { - frameHeader := 4 + 2 // magic + frame header & window descriptor - if e.o.dict != nil { - frameHeader += 4 - } - // Frame content size: - if size < 256 { - frameHeader++ - } else if size < 65536+256 { - frameHeader += 2 - } else if size < math.MaxInt32 { - frameHeader += 4 - } else { - frameHeader += 8 - } - // Final crc - if e.o.crc { - frameHeader += 4 - } - - // Max overhead is 3 bytes/block. - // There cannot be 0 blocks. - blocks := (size + e.o.blockSize) / e.o.blockSize - - // Combine, add padding. - maxSz := frameHeader + 3*blocks + size - if e.o.pad > 1 { - maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad)) - } - return maxSz -} diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go deleted file mode 100644 index 20671dcb91..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ /dev/null @@ -1,339 +0,0 @@ -package zstd - -import ( - "errors" - "fmt" - "math" - "math/bits" - "runtime" - "strings" -) - -// EOption is an option for creating a encoder. -type EOption func(*encoderOptions) error - -// options retains accumulated state of multiple options. -type encoderOptions struct { - concurrent int - level EncoderLevel - single *bool - pad int - blockSize int - windowSize int - crc bool - fullZero bool - noEntropy bool - allLitEntropy bool - customWindow bool - customALEntropy bool - customBlockSize bool - lowMem bool - dict *dict -} - -func (o *encoderOptions) setDefault() { - *o = encoderOptions{ - concurrent: runtime.GOMAXPROCS(0), - crc: true, - single: nil, - blockSize: maxCompressedBlockSize, - windowSize: 8 << 20, - level: SpeedDefault, - allLitEntropy: false, - lowMem: false, - } -} - -// encoder returns an encoder with the selected options. -func (o encoderOptions) encoder() encoder { - switch o.level { - case SpeedFastest: - if o.dict != nil { - return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - - case SpeedDefault: - if o.dict != nil { - return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}} - } - return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - case SpeedBetterCompression: - if o.dict != nil { - return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}} - } - return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - case SpeedBestCompression: - return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}} - } - panic("unknown compression level") -} - -// WithEncoderCRC will add CRC value to output. -// Output will be 4 bytes larger. -func WithEncoderCRC(b bool) EOption { - return func(o *encoderOptions) error { o.crc = b; return nil } -} - -// WithEncoderConcurrency will set the concurrency, -// meaning the maximum number of encoders to run concurrently. -// The value supplied must be at least 1. -// For streams, setting a value of 1 will disable async compression. -// By default this will be set to GOMAXPROCS. -func WithEncoderConcurrency(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("concurrency must be at least 1") - } - o.concurrent = n - return nil - } -} - -// WithWindowSize will set the maximum allowed back-reference distance. -// The value must be a power of two between MinWindowSize and MaxWindowSize. -// A larger value will enable better compression but allocate more memory and, -// for above-default values, take considerably longer. -// The default value is determined by the compression level and max 8MB. -func WithWindowSize(n int) EOption { - return func(o *encoderOptions) error { - switch { - case n < MinWindowSize: - return fmt.Errorf("window size must be at least %d", MinWindowSize) - case n > MaxWindowSize: - return fmt.Errorf("window size must be at most %d", MaxWindowSize) - case (n & (n - 1)) != 0: - return errors.New("window size must be a power of 2") - } - - o.windowSize = n - o.customWindow = true - if o.blockSize > o.windowSize { - o.blockSize = o.windowSize - o.customBlockSize = true - } - return nil - } -} - -// WithEncoderPadding will add padding to all output so the size will be a multiple of n. -// This can be used to obfuscate the exact output size or make blocks of a certain size. -// The contents will be a skippable frame, so it will be invisible by the decoder. -// n must be > 0 and <= 1GB, 1<<30 bytes. -// The padded area will be filled with data from crypto/rand.Reader. -// If `EncodeAll` is used with data already in the destination, the total size will be multiple of this. -func WithEncoderPadding(n int) EOption { - return func(o *encoderOptions) error { - if n <= 0 { - return fmt.Errorf("padding must be at least 1") - } - // No need to waste our time. - if n == 1 { - n = 0 - } - if n > 1<<30 { - return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ") - } - o.pad = n - return nil - } -} - -// EncoderLevel predefines encoder compression levels. -// Only use the constants made available, since the actual mapping -// of these values are very likely to change and your compression could change -// unpredictably when upgrading the library. -type EncoderLevel int - -const ( - speedNotSet EncoderLevel = iota - - // SpeedFastest will choose the fastest reasonable compression. - // This is roughly equivalent to the fastest Zstandard mode. - SpeedFastest - - // SpeedDefault is the default "pretty fast" compression option. - // This is roughly equivalent to the default Zstandard mode (level 3). - SpeedDefault - - // SpeedBetterCompression will yield better compression than the default. - // Currently it is about zstd level 7-8 with ~ 2x-3x the default CPU usage. - // By using this, notice that CPU usage may go up in the future. - SpeedBetterCompression - - // SpeedBestCompression will choose the best available compression option. - // This will offer the best compression no matter the CPU cost. - SpeedBestCompression - - // speedLast should be kept as the last actual compression option. - // The is not for external usage, but is used to keep track of the valid options. - speedLast -) - -// EncoderLevelFromString will convert a string representation of an encoding level back -// to a compression level. The compare is not case sensitive. -// If the string wasn't recognized, (false, SpeedDefault) will be returned. -func EncoderLevelFromString(s string) (bool, EncoderLevel) { - for l := speedNotSet + 1; l < speedLast; l++ { - if strings.EqualFold(s, l.String()) { - return true, l - } - } - return false, SpeedDefault -} - -// EncoderLevelFromZstd will return an encoder level that closest matches the compression -// ratio of a specific zstd compression level. -// Many input values will provide the same compression level. -func EncoderLevelFromZstd(level int) EncoderLevel { - switch { - case level < 3: - return SpeedFastest - case level >= 3 && level < 6: - return SpeedDefault - case level >= 6 && level < 10: - return SpeedBetterCompression - default: - return SpeedBestCompression - } -} - -// String provides a string representation of the compression level. -func (e EncoderLevel) String() string { - switch e { - case SpeedFastest: - return "fastest" - case SpeedDefault: - return "default" - case SpeedBetterCompression: - return "better" - case SpeedBestCompression: - return "best" - default: - return "invalid" - } -} - -// WithEncoderLevel specifies a predefined compression level. -func WithEncoderLevel(l EncoderLevel) EOption { - return func(o *encoderOptions) error { - switch { - case l <= speedNotSet || l >= speedLast: - return fmt.Errorf("unknown encoder level") - } - o.level = l - if !o.customWindow { - switch o.level { - case SpeedFastest: - o.windowSize = 4 << 20 - if !o.customBlockSize { - o.blockSize = 1 << 16 - } - case SpeedDefault: - o.windowSize = 8 << 20 - case SpeedBetterCompression: - o.windowSize = 8 << 20 - case SpeedBestCompression: - o.windowSize = 8 << 20 - } - } - if !o.customALEntropy { - o.allLitEntropy = l > SpeedDefault - } - - return nil - } -} - -// WithZeroFrames will encode 0 length input as full frames. -// This can be needed for compatibility with zstandard usage, -// but is not needed for this package. -func WithZeroFrames(b bool) EOption { - return func(o *encoderOptions) error { - o.fullZero = b - return nil - } -} - -// WithAllLitEntropyCompression will apply entropy compression if no matches are found. -// Disabling this will skip incompressible data faster, but in cases with no matches but -// skewed character distribution compression is lost. -// Default value depends on the compression level selected. -func WithAllLitEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.customALEntropy = true - o.allLitEntropy = b - return nil - } -} - -// WithNoEntropyCompression will always skip entropy compression of literals. -// This can be useful if content has matches, but unlikely to benefit from entropy -// compression. Usually the slight speed improvement is not worth enabling this. -func WithNoEntropyCompression(b bool) EOption { - return func(o *encoderOptions) error { - o.noEntropy = b - return nil - } -} - -// WithSingleSegment will set the "single segment" flag when EncodeAll is used. -// If this flag is set, data must be regenerated within a single continuous memory segment. -// In this case, Window_Descriptor byte is skipped, but Frame_Content_Size is necessarily present. -// As a consequence, the decoder must allocate a memory segment of size equal or larger than size of your content. -// In order to preserve the decoder from unreasonable memory requirements, -// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range. -// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB. -// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations. -// If this is not specified, block encodes will automatically choose this based on the input size and the window size. -// This setting has no effect on streamed encodes. -func WithSingleSegment(b bool) EOption { - return func(o *encoderOptions) error { - o.single = &b - return nil - } -} - -// WithLowerEncoderMem will trade in some memory cases trade less memory usage for -// slower encoding speed. -// This will not change the window size which is the primary function for reducing -// memory usage. See WithWindowSize. -func WithLowerEncoderMem(b bool) EOption { - return func(o *encoderOptions) error { - o.lowMem = b - return nil - } -} - -// WithEncoderDict allows to register a dictionary that will be used for the encode. -// -// The slice dict must be in the [dictionary format] produced by -// "zstd --train" from the Zstandard reference implementation. -// -// The encoder *may* choose to use no dictionary instead for certain payloads. -// -// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format -func WithEncoderDict(dict []byte) EOption { - return func(o *encoderOptions) error { - d, err := loadDict(dict) - if err != nil { - return err - } - o.dict = d - return nil - } -} - -// WithEncoderDictRaw registers a dictionary that may be used by the encoder. -// -// The slice content may contain arbitrary data. It will be used as an initial -// history. -func WithEncoderDictRaw(id uint32, content []byte) EOption { - return func(o *encoderOptions) error { - if bits.UintSize > 32 && uint(len(content)) > dictMaxLength { - return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content)) - } - o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}} - return nil - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go deleted file mode 100644 index e47af66e7c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ /dev/null @@ -1,415 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "io" - - "github.com/klauspost/compress/zstd/internal/xxhash" -) - -type frameDec struct { - o decoderOptions - crc *xxhash.Digest - - WindowSize uint64 - - // Frame history passed between blocks - history history - - rawInput byteBuffer - - // Byte buffer that can be reused for small input blocks. - bBuf byteBuf - - FrameContentSize uint64 - - DictionaryID uint32 - HasCheckSum bool - SingleSegment bool -} - -const ( - // MinWindowSize is the minimum Window Size, which is 1 KB. - MinWindowSize = 1 << 10 - - // MaxWindowSize is the maximum encoder window size - // and the default decoder maximum window size. - MaxWindowSize = 1 << 29 -) - -const ( - frameMagic = "\x28\xb5\x2f\xfd" - skippableFrameMagic = "\x2a\x4d\x18" -) - -func newFrameDec(o decoderOptions) *frameDec { - if o.maxWindowSize > o.maxDecodedSize { - o.maxWindowSize = o.maxDecodedSize - } - d := frameDec{ - o: o, - } - return &d -} - -// reset will read the frame header and prepare for block decoding. -// If nothing can be read from the input, io.EOF will be returned. -// Any other error indicated that the stream contained data, but -// there was a problem. -func (d *frameDec) reset(br byteBuffer) error { - d.HasCheckSum = false - d.WindowSize = 0 - var signature [4]byte - for { - var err error - // Check if we can read more... - b, err := br.readSmall(1) - switch err { - case io.EOF, io.ErrUnexpectedEOF: - return io.EOF - case nil: - signature[0] = b[0] - default: - return err - } - // Read the rest, don't allow io.ErrUnexpectedEOF - b, err = br.readSmall(3) - switch err { - case io.EOF: - return io.EOF - case nil: - copy(signature[1:], b) - default: - return err - } - - if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 { - if debugDecoder { - println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic))) - } - // Break if not skippable frame. - break - } - // Read size to skip - b, err = br.readSmall(4) - if err != nil { - if debugDecoder { - println("Reading Frame Size", err) - } - return err - } - n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - println("Skipping frame with", n, "bytes.") - err = br.skipN(int64(n)) - if err != nil { - if debugDecoder { - println("Reading discarded frame", err) - } - return err - } - } - if string(signature[:]) != frameMagic { - if debugDecoder { - println("Got magic numbers: ", signature, "want:", []byte(frameMagic)) - } - return ErrMagicMismatch - } - - // Read Frame_Header_Descriptor - fhd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Frame_Header_Descriptor", err) - } - return err - } - d.SingleSegment = fhd&(1<<5) != 0 - - if fhd&(1<<3) != 0 { - return errors.New("reserved bit set on frame header") - } - - // Read Window_Descriptor - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor - d.WindowSize = 0 - if !d.SingleSegment { - wd, err := br.readByte() - if err != nil { - if debugDecoder { - println("Reading Window_Descriptor", err) - } - return err - } - if debugDecoder { - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) - } - windowLog := 10 + (wd >> 3) - windowBase := uint64(1) << windowLog - windowAdd := (windowBase / 8) * uint64(wd&0x7) - d.WindowSize = windowBase + windowAdd - } - - // Read Dictionary_ID - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id - d.DictionaryID = 0 - if size := fhd & 3; size != 0 { - if size == 3 { - size = 4 - } - - b, err := br.readSmall(int(size)) - if err != nil { - println("Reading Dictionary_ID", err) - return err - } - var id uint32 - switch len(b) { - case 1: - id = uint32(b[0]) - case 2: - id = uint32(b[0]) | (uint32(b[1]) << 8) - case 4: - id = uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - } - if debugDecoder { - println("Dict size", size, "ID:", id) - } - d.DictionaryID = id - } - - // Read Frame_Content_Size - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#frame_content_size - var fcsSize int - v := fhd >> 6 - switch v { - case 0: - if d.SingleSegment { - fcsSize = 1 - } - default: - fcsSize = 1 << v - } - d.FrameContentSize = fcsUnknown - if fcsSize > 0 { - b, err := br.readSmall(fcsSize) - if err != nil { - println("Reading Frame content", err) - return err - } - switch len(b) { - case 1: - d.FrameContentSize = uint64(b[0]) - case 2: - // When FCS_Field_Size is 2, the offset of 256 is added. - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) + 256 - case 4: - d.FrameContentSize = uint64(b[0]) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) - case 8: - d1 := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24) - d2 := uint32(b[4]) | (uint32(b[5]) << 8) | (uint32(b[6]) << 16) | (uint32(b[7]) << 24) - d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) - } - if debugDecoder { - println("Read FCS:", d.FrameContentSize) - } - } - - // Move this to shared. - d.HasCheckSum = fhd&(1<<2) != 0 - if d.HasCheckSum { - if d.crc == nil { - d.crc = xxhash.New() - } - d.crc.Reset() - } - - if d.WindowSize > d.o.maxWindowSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrWindowSizeExceeded - } - - if d.WindowSize == 0 && d.SingleSegment { - // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } - if d.WindowSize > d.o.maxDecodedSize { - if debugDecoder { - printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) - } - return ErrDecoderSizeExceeded - } - } - - // The minimum Window_Size is 1 KB. - if d.WindowSize < MinWindowSize { - if debugDecoder { - println("got window size: ", d.WindowSize) - } - return ErrWindowSizeTooSmall - } - d.history.windowSize = int(d.WindowSize) - if !d.o.lowMem || d.history.windowSize < maxBlockSize { - // Alloc 2x window size if not low-mem, or window size below 2MB. - d.history.allocFrameBuffer = d.history.windowSize * 2 - } else { - if d.o.lowMem { - // Alloc with 1MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2 - } else { - // Alloc with 2MB extra. - d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize - } - } - - if debugDecoder { - println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) - } - - // history contains input - maybe we do something - d.rawInput = br - return nil -} - -// next will start decoding the next block from stream. -func (d *frameDec) next(block *blockDec) error { - if debugDecoder { - println("decoding new block") - } - err := block.reset(d.rawInput, d.WindowSize) - if err != nil { - println("block error:", err) - // Signal the frame decoder we have a problem. - block.sendErr(err) - return err - } - return nil -} - -// checkCRC will check the checksum, assuming the frame has one. -// Will return ErrCRCMismatch if crc check failed, otherwise nil. -func (d *frameDec) checkCRC() error { - // We can overwrite upper tmp now - buf, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - return err - } - - want := binary.LittleEndian.Uint32(buf[:4]) - got := uint32(d.crc.Sum64()) - - if got != want { - if debugDecoder { - printf("CRC check failed: got %08x, want %08x\n", got, want) - } - return ErrCRCMismatch - } - if debugDecoder { - printf("CRC ok %08x\n", got) - } - return nil -} - -// consumeCRC skips over the checksum, assuming the frame has one. -func (d *frameDec) consumeCRC() error { - _, err := d.rawInput.readSmall(4) - if err != nil { - println("CRC missing?", err) - } - return err -} - -// runDecoder will run the decoder for the remainder of the frame. -func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { - saved := d.history.b - - // We use the history for output to avoid copying it. - d.history.b = dst - d.history.ignoreBuffer = len(dst) - // Store input length, so we only check new data. - crcStart := len(dst) - d.history.decoders.maxSyncLen = 0 - if d.o.limitToCap { - d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst)) - } - if d.FrameContentSize != fcsUnknown { - if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen { - d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst)) - } - if d.history.decoders.maxSyncLen > d.o.maxDecodedSize { - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize) - } - return dst, ErrDecoderSizeExceeded - } - if debugDecoder { - println("maxSyncLen:", d.history.decoders.maxSyncLen) - } - if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen { - // Alloc for output - dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc) - copy(dst2, dst) - dst = dst2 - } - } - var err error - for { - err = dec.reset(d.rawInput, d.WindowSize) - if err != nil { - break - } - if debugDecoder { - println("next block:", dec) - } - err = dec.decodeBuf(&d.history) - if err != nil { - break - } - if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize { - println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize) - err = ErrDecoderSizeExceeded - break - } - if d.o.limitToCap && len(d.history.b) > cap(dst) { - println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst)) - err = ErrDecoderSizeExceeded - break - } - if uint64(len(d.history.b)-crcStart) > d.FrameContentSize { - println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) - err = ErrFrameSizeExceeded - break - } - if dec.Last { - break - } - if debugDecoder { - println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) - } - } - dst = d.history.b - if err == nil { - if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { - err = ErrFrameSizeMismatch - } else if d.HasCheckSum { - if d.o.ignoreChecksum { - err = d.consumeCRC() - } else { - d.crc.Write(dst[crcStart:]) - err = d.checkCRC() - } - } - } - d.history.b = saved - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go deleted file mode 100644 index 667ca06794..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/frameenc.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "fmt" - "io" - "math" - "math/bits" -) - -type frameHeader struct { - ContentSize uint64 - WindowSize uint32 - SingleSegment bool - Checksum bool - DictID uint32 -} - -const maxHeaderSize = 14 - -func (f frameHeader) appendTo(dst []byte) []byte { - dst = append(dst, frameMagic...) - var fhd uint8 - if f.Checksum { - fhd |= 1 << 2 - } - if f.SingleSegment { - fhd |= 1 << 5 - } - - var dictIDContent []byte - if f.DictID > 0 { - var tmp [4]byte - if f.DictID < 256 { - fhd |= 1 - tmp[0] = uint8(f.DictID) - dictIDContent = tmp[:1] - } else if f.DictID < 1<<16 { - fhd |= 2 - binary.LittleEndian.PutUint16(tmp[:2], uint16(f.DictID)) - dictIDContent = tmp[:2] - } else { - fhd |= 3 - binary.LittleEndian.PutUint32(tmp[:4], f.DictID) - dictIDContent = tmp[:4] - } - } - var fcs uint8 - if f.ContentSize >= 256 { - fcs++ - } - if f.ContentSize >= 65536+256 { - fcs++ - } - if f.ContentSize >= 0xffffffff { - fcs++ - } - - fhd |= fcs << 6 - - dst = append(dst, fhd) - if !f.SingleSegment { - const winLogMin = 10 - windowLog := (bits.Len32(f.WindowSize-1) - winLogMin) << 3 - dst = append(dst, uint8(windowLog)) - } - if f.DictID > 0 { - dst = append(dst, dictIDContent...) - } - switch fcs { - case 0: - if f.SingleSegment { - dst = append(dst, uint8(f.ContentSize)) - } - // Unless SingleSegment is set, framessizes < 256 are not stored. - case 1: - f.ContentSize -= 256 - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8)) - case 2: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24)) - case 3: - dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8), uint8(f.ContentSize>>16), uint8(f.ContentSize>>24), - uint8(f.ContentSize>>32), uint8(f.ContentSize>>40), uint8(f.ContentSize>>48), uint8(f.ContentSize>>56)) - default: - panic("invalid fcs") - } - return dst -} - -const skippableFrameHeader = 4 + 4 - -// calcSkippableFrame will return a total size to be added for written -// to be divisible by multiple. -// The value will always be > skippableFrameHeader. -// The function will panic if written < 0 or wantMultiple <= 0. -func calcSkippableFrame(written, wantMultiple int64) int { - if wantMultiple <= 0 { - panic("wantMultiple <= 0") - } - if written < 0 { - panic("written < 0") - } - leftOver := written % wantMultiple - if leftOver == 0 { - return 0 - } - toAdd := wantMultiple - leftOver - for toAdd < skippableFrameHeader { - toAdd += wantMultiple - } - return int(toAdd) -} - -// skippableFrame will add a skippable frame with a total size of bytes. -// total should be >= skippableFrameHeader and < math.MaxUint32. -func skippableFrame(dst []byte, total int, r io.Reader) ([]byte, error) { - if total == 0 { - return dst, nil - } - if total < skippableFrameHeader { - return dst, fmt.Errorf("requested skippable frame (%d) < 8", total) - } - if int64(total) > math.MaxUint32 { - return dst, fmt.Errorf("requested skippable frame (%d) > max uint32", total) - } - dst = append(dst, 0x50, 0x2a, 0x4d, 0x18) - f := uint32(total - skippableFrameHeader) - dst = append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)) - start := len(dst) - dst = append(dst, make([]byte, f)...) - _, err := io.ReadFull(r, dst[start:]) - return dst, err -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder.go deleted file mode 100644 index 2f8860a722..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "fmt" - "io" -) - -const ( - tablelogAbsoluteMax = 9 -) - -const ( - /*!MEMORY_USAGE : - * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.) - * Increasing memory usage improves compression ratio - * Reduced memory usage can improve speed, due to cache effect - * Recommended max value is 14, for 16KB, which nicely fits into Intel x86 L1 cache */ - maxMemoryUsage = tablelogAbsoluteMax + 2 - - maxTableLog = maxMemoryUsage - 2 - maxTablesize = 1 << maxTableLog - maxTableMask = (1 << maxTableLog) - 1 - minTablelog = 5 - maxSymbolValue = 255 -) - -// fseDecoder provides temporary storage for compression and decompression. -type fseDecoder struct { - dt [maxTablesize]decSymbol // Decompression table. - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - maxBits uint8 // Maximum number of additional bits - - // used for table creation to avoid allocations. - stateTable [256]uint16 - norm [maxSymbolValue + 1]int16 - preDefined bool -} - -// tableStep returns the next table index. -func tableStep(tableSize uint32) uint32 { - return (tableSize >> 1) + (tableSize >> 3) + 3 -} - -// readNCount will read the symbol distribution so decoding tables can be constructed. -func (s *fseDecoder) readNCount(b *byteReader, maxSymbol uint16) error { - var ( - charnum uint16 - previous0 bool - ) - if b.remain() < 4 { - return errors.New("input too small") - } - bitStream := b.Uint32NC() - nbBits := uint((bitStream & 0xF) + minTablelog) // extract tableLog - if nbBits > tablelogAbsoluteMax { - println("Invalid tablelog:", nbBits) - return errors.New("tableLog too large") - } - bitStream >>= 4 - bitCount := uint(4) - - s.actualTableLog = uint8(nbBits) - remaining := int32((1 << nbBits) + 1) - threshold := int32(1 << nbBits) - gotTotal := int32(0) - nbBits++ - - for remaining > 1 && charnum <= maxSymbol { - if previous0 { - //println("prev0") - n0 := charnum - for (bitStream & 0xFFFF) == 0xFFFF { - //println("24 x 0") - n0 += 24 - if r := b.remain(); r > 5 { - b.advance(2) - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - // end of bit stream - bitStream >>= 16 - bitCount += 16 - } - } - //printf("bitstream: %d, 0b%b", bitStream&3, bitStream) - for (bitStream & 3) == 3 { - n0 += 3 - bitStream >>= 2 - bitCount += 2 - } - n0 += uint16(bitStream & 3) - bitCount += 2 - - if n0 > maxSymbolValue { - return errors.New("maxSymbolValue too small") - } - //println("inserting ", n0-charnum, "zeroes from idx", charnum, "ending before", n0) - for charnum < n0 { - s.norm[uint8(charnum)] = 0 - charnum++ - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> bitCount - } else { - bitStream >>= 2 - } - } - - max := (2*threshold - 1) - remaining - var count int32 - - if int32(bitStream)&(threshold-1) < max { - count = int32(bitStream) & (threshold - 1) - if debugAsserts && nbBits < 1 { - panic("nbBits underflow") - } - bitCount += nbBits - 1 - } else { - count = int32(bitStream) & (2*threshold - 1) - if count >= threshold { - count -= max - } - bitCount += nbBits - } - - // extra accuracy - count-- - if count < 0 { - // -1 means +1 - remaining += count - gotTotal -= count - } else { - remaining -= count - gotTotal += count - } - s.norm[charnum&0xff] = int16(count) - charnum++ - previous0 = count == 0 - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if r := b.remain(); r >= 7 || r-int(bitCount>>3) >= 4 { - b.advance(bitCount >> 3) - bitCount &= 7 - // The check above should make sure we can read 32 bits - bitStream = b.Uint32NC() >> (bitCount & 31) - } else { - bitCount -= (uint)(8 * (len(b.b) - 4 - b.off)) - b.off = len(b.b) - 4 - bitStream = b.Uint32() >> (bitCount & 31) - } - } - s.symbolLen = charnum - if s.symbolLen <= 1 { - return fmt.Errorf("symbolLen (%d) too small", s.symbolLen) - } - if s.symbolLen > maxSymbolValue+1 { - return fmt.Errorf("symbolLen (%d) too big", s.symbolLen) - } - if remaining != 1 { - return fmt.Errorf("corruption detected (remaining %d != 1)", remaining) - } - if bitCount > 32 { - return fmt.Errorf("corruption detected (bitCount %d > 32)", bitCount) - } - if gotTotal != 1<> 3) - return s.buildDtable() -} - -func (s *fseDecoder) mustReadFrom(r io.Reader) { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - // dt [maxTablesize]decSymbol // Decompression table. - // symbolLen uint16 // Length of active part of the symbol table. - // actualTableLog uint8 // Selected tablelog. - // maxBits uint8 // Maximum number of additional bits - // // used for table creation to avoid allocations. - // stateTable [256]uint16 - // norm [maxSymbolValue + 1]int16 - // preDefined bool - fatalErr(binary.Read(r, binary.LittleEndian, &s.dt)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.symbolLen)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.actualTableLog)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.maxBits)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.stateTable)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.norm)) - fatalErr(binary.Read(r, binary.LittleEndian, &s.preDefined)) -} - -// decSymbol contains information about a state entry, -// Including the state offset base, the output symbol and -// the number of bits to read for the low part of the destination state. -// Using a composite uint64 is faster than a struct with separate members. -type decSymbol uint64 - -func newDecSymbol(nbits, addBits uint8, newState uint16, baseline uint32) decSymbol { - return decSymbol(nbits) | (decSymbol(addBits) << 8) | (decSymbol(newState) << 16) | (decSymbol(baseline) << 32) -} - -func (d decSymbol) nbBits() uint8 { - return uint8(d) -} - -func (d decSymbol) addBits() uint8 { - return uint8(d >> 8) -} - -func (d decSymbol) newState() uint16 { - return uint16(d >> 16) -} - -func (d decSymbol) baselineInt() int { - return int(d >> 32) -} - -func (d *decSymbol) setNBits(nBits uint8) { - const mask = 0xffffffffffffff00 - *d = (*d & mask) | decSymbol(nBits) -} - -func (d *decSymbol) setAddBits(addBits uint8) { - const mask = 0xffffffffffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) -} - -func (d *decSymbol) setNewState(state uint16) { - const mask = 0xffffffff0000ffff - *d = (*d & mask) | decSymbol(state)<<16 -} - -func (d *decSymbol) setExt(addBits uint8, baseline uint32) { - const mask = 0xffff00ff - *d = (*d & mask) | (decSymbol(addBits) << 8) | (decSymbol(baseline) << 32) -} - -// decSymbolValue returns the transformed decSymbol for the given symbol. -func decSymbolValue(symb uint8, t []baseOffset) (decSymbol, error) { - if int(symb) >= len(t) { - return 0, fmt.Errorf("rle symbol %d >= max %d", symb, len(t)) - } - lu := t[symb] - return newDecSymbol(0, lu.addBits, 0, lu.baseLine), nil -} - -// setRLE will set the decoder til RLE mode. -func (s *fseDecoder) setRLE(symbol decSymbol) { - s.actualTableLog = 0 - s.maxBits = symbol.addBits() - s.dt[0] = symbol -} - -// transform will transform the decoder table into a table usable for -// decoding without having to apply the transformation while decoding. -// The state will contain the base value and the number of bits to read. -func (s *fseDecoder) transform(t []baseOffset) error { - tableSize := uint16(1 << s.actualTableLog) - s.maxBits = 0 - for i, v := range s.dt[:tableSize] { - add := v.addBits() - if int(add) >= len(t) { - return fmt.Errorf("invalid decoding table entry %d, symbol %d >= max (%d)", i, v.addBits(), len(t)) - } - lu := t[add] - if lu.addBits > s.maxBits { - s.maxBits = lu.addBits - } - v.setExt(lu.addBits, lu.baseLine) - s.dt[i] = v - } - return nil -} - -type fseState struct { - dt []decSymbol - state decSymbol -} - -// Initialize and decodeAsync first state and symbol. -func (s *fseState) init(br *bitReader, tableLog uint8, dt []decSymbol) { - s.dt = dt - br.fill() - s.state = dt[br.getBits(tableLog)] -} - -// final returns the current state symbol without decoding the next. -func (s decSymbol) final() (int, uint8) { - return s.baselineInt(), s.addBits() -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go deleted file mode 100644 index d04a829b0a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go +++ /dev/null @@ -1,65 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" -) - -type buildDtableAsmContext struct { - // inputs - stateTable *uint16 - norm *int16 - dt *uint64 - - // outputs --- set by the procedure in the case of error; - // for interpretation please see the error handling part below - errParam1 uint64 - errParam2 uint64 -} - -// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable. -// Function returns non-zero exit code on error. -// -//go:noescape -func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int - -// please keep in sync with _generate/gen_fse.go -const ( - errorCorruptedNormalizedCounter = 1 - errorNewStateTooBig = 2 - errorNewStateNoBits = 3 -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - ctx := buildDtableAsmContext{ - stateTable: &s.stateTable[0], - norm: &s.norm[0], - dt: (*uint64)(&s.dt[0]), - } - code := buildDtable_asm(s, &ctx) - - if code != 0 { - switch code { - case errorCorruptedNormalizedCounter: - position := ctx.errParam1 - return fmt.Errorf("corrupted input (position=%d, expected 0)", position) - - case errorNewStateTooBig: - newState := decSymbol(ctx.errParam1) - size := ctx.errParam2 - return fmt.Errorf("newState (%d) outside table size (%d)", newState, size) - - case errorNewStateNoBits: - newState := decSymbol(ctx.errParam1) - oldState := decSymbol(ctx.errParam2) - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, oldState) - - default: - return fmt.Errorf("buildDtable_asm returned unhandled nonzero code = %d", code) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s deleted file mode 100644 index bcde398695..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s +++ /dev/null @@ -1,126 +0,0 @@ -// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int -TEXT ·buildDtable_asm(SB), $0-24 - MOVQ ctx+8(FP), CX - MOVQ s+0(FP), DI - - // Load values - MOVBQZX 4098(DI), DX - XORQ AX, AX - BTSQ DX, AX - MOVQ (CX), BX - MOVQ 16(CX), SI - LEAQ -1(AX), R8 - MOVQ 8(CX), CX - MOVWQZX 4096(DI), DI - - // End load values - // Init, lay down lowprob symbols - XORQ R9, R9 - JMP init_main_loop_condition - -init_main_loop: - MOVWQSX (CX)(R9*2), R10 - CMPW R10, $-1 - JNE do_not_update_high_threshold - MOVB R9, 1(SI)(R8*8) - DECQ R8 - MOVQ $0x0000000000000001, R10 - -do_not_update_high_threshold: - MOVW R10, (BX)(R9*2) - INCQ R9 - -init_main_loop_condition: - CMPQ R9, DI - JL init_main_loop - - // Spread symbols - // Calculate table step - MOVQ AX, R9 - SHRQ $0x01, R9 - MOVQ AX, R10 - SHRQ $0x03, R10 - LEAQ 3(R9)(R10*1), R9 - - // Fill add bits values - LEAQ -1(AX), R10 - XORQ R11, R11 - XORQ R12, R12 - JMP spread_main_loop_condition - -spread_main_loop: - XORQ R13, R13 - MOVWQSX (CX)(R12*2), R14 - JMP spread_inner_loop_condition - -spread_inner_loop: - MOVB R12, 1(SI)(R11*8) - -adjust_position: - ADDQ R9, R11 - ANDQ R10, R11 - CMPQ R11, R8 - JG adjust_position - INCQ R13 - -spread_inner_loop_condition: - CMPQ R13, R14 - JL spread_inner_loop - INCQ R12 - -spread_main_loop_condition: - CMPQ R12, DI - JL spread_main_loop - TESTQ R11, R11 - JZ spread_check_ok - MOVQ ctx+8(FP), AX - MOVQ R11, 24(AX) - MOVQ $+1, ret+16(FP) - RET - -spread_check_ok: - // Build Decoding table - XORQ DI, DI - -build_table_main_table: - MOVBQZX 1(SI)(DI*8), CX - MOVWQZX (BX)(CX*2), R8 - LEAQ 1(R8), R9 - MOVW R9, (BX)(CX*2) - MOVQ R8, R9 - BSRQ R9, R9 - MOVQ DX, CX - SUBQ R9, CX - SHLQ CL, R8 - SUBQ AX, R8 - MOVB CL, (SI)(DI*8) - MOVW R8, 2(SI)(DI*8) - CMPQ R8, AX - JLE build_table_check1_ok - MOVQ ctx+8(FP), CX - MOVQ R8, 24(CX) - MOVQ AX, 32(CX) - MOVQ $+2, ret+16(FP) - RET - -build_table_check1_ok: - TESTB CL, CL - JNZ build_table_check2_ok - CMPW R8, DI - JNE build_table_check2_ok - MOVQ ctx+8(FP), AX - MOVQ R8, 24(AX) - MOVQ DI, 32(AX) - MOVQ $+3, ret+16(FP) - RET - -build_table_check2_ok: - INCQ DI - CMPQ DI, AX - JL build_table_main_table - MOVQ $+0, ret+16(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go deleted file mode 100644 index 8adfebb029..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "errors" - "fmt" -) - -// buildDtable will build the decoding table. -func (s *fseDecoder) buildDtable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - symbolNext := s.stateTable[:256] - - // Init, lay down lowprob symbols - { - for i, v := range s.norm[:s.symbolLen] { - if v == -1 { - s.dt[highThreshold].setAddBits(uint8(i)) - highThreshold-- - v = 1 - } - symbolNext[i] = uint16(v) - } - } - - // Spread symbols - { - tableMask := tableSize - 1 - step := tableStep(tableSize) - position := uint32(0) - for ss, v := range s.norm[:s.symbolLen] { - for i := 0; i < int(v); i++ { - s.dt[position].setAddBits(uint8(ss)) - for { - // lowprob area - position = (position + step) & tableMask - if position <= highThreshold { - break - } - } - } - } - if position != 0 { - // position must reach all cells once, otherwise normalizedCounter is incorrect - return errors.New("corrupted input (position != 0)") - } - } - - // Build Decoding table - { - tableSize := uint16(1 << s.actualTableLog) - for u, v := range s.dt[:tableSize] { - symbol := v.addBits() - nextState := symbolNext[symbol] - symbolNext[symbol] = nextState + 1 - nBits := s.actualTableLog - byte(highBits(uint32(nextState))) - s.dt[u&maxTableMask].setNBits(nBits) - newState := (nextState << nBits) - tableSize - if newState > tableSize { - return fmt.Errorf("newState (%d) outside table size (%d)", newState, tableSize) - } - if newState == uint16(u) && nBits == 0 { - // Seems weird that this is possible with nbits > 0. - return fmt.Errorf("newState (%d) == oldState (%d) and no bits", newState, u) - } - s.dt[u&maxTableMask].setNewState(newState) - } - } - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go deleted file mode 100644 index ab26326a8f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "math" -) - -const ( - // For encoding we only support up to - maxEncTableLog = 8 - maxEncTablesize = 1 << maxTableLog - maxEncTableMask = (1 << maxTableLog) - 1 - minEncTablelog = 5 - maxEncSymbolValue = maxMatchLengthSymbol -) - -// Scratch provides temporary storage for compression and decompression. -type fseEncoder struct { - symbolLen uint16 // Length of active part of the symbol table. - actualTableLog uint8 // Selected tablelog. - ct cTable // Compression tables. - maxCount int // count of the most probable symbol - zeroBits bool // no bits has prob > 50%. - clearCount bool // clear count - useRLE bool // This encoder is for RLE - preDefined bool // This encoder is predefined. - reUsed bool // Set to know when the encoder has been reused. - rleVal uint8 // RLE Symbol - maxBits uint8 // Maximum output bits after transform. - - // TODO: Technically zstd should be fine with 64 bytes. - count [256]uint32 - norm [256]int16 -} - -// cTable contains tables used for compression. -type cTable struct { - tableSymbol []byte - stateTable []uint16 - symbolTT []symbolTransform -} - -// symbolTransform contains the state transform for a symbol. -type symbolTransform struct { - deltaNbBits uint32 - deltaFindState int16 - outBits uint8 -} - -// String prints values as a human readable string. -func (s symbolTransform) String() string { - return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) -} - -// Histogram allows to populate the histogram and skip that step in the compression, -// It otherwise allows to inspect the histogram when compression is done. -// To indicate that you have populated the histogram call HistogramFinished -// with the value of the highest populated symbol, as well as the number of entries -// in the most populated entry. These are accepted at face value. -func (s *fseEncoder) Histogram() *[256]uint32 { - return &s.count -} - -// HistogramFinished can be called to indicate that the histogram has been populated. -// maxSymbol is the index of the highest set symbol of the next data segment. -// maxCount is the number of entries in the most populated entry. -// These are accepted at face value. -func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { - s.maxCount = maxCount - s.symbolLen = uint16(maxSymbol) + 1 - s.clearCount = maxCount != 0 -} - -// allocCtable will allocate tables needed for compression. -// If existing tables a re big enough, they are simply re-used. -func (s *fseEncoder) allocCtable() { - tableSize := 1 << s.actualTableLog - // get tableSymbol that is big enough. - if cap(s.ct.tableSymbol) < tableSize { - s.ct.tableSymbol = make([]byte, tableSize) - } - s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] - - ctSize := tableSize - if cap(s.ct.stateTable) < ctSize { - s.ct.stateTable = make([]uint16, ctSize) - } - s.ct.stateTable = s.ct.stateTable[:ctSize] - - if cap(s.ct.symbolTT) < 256 { - s.ct.symbolTT = make([]symbolTransform, 256) - } - s.ct.symbolTT = s.ct.symbolTT[:256] -} - -// buildCTable will populate the compression table so it is ready to be used. -func (s *fseEncoder) buildCTable() error { - tableSize := uint32(1 << s.actualTableLog) - highThreshold := tableSize - 1 - var cumul [256]int16 - - s.allocCtable() - tableSymbol := s.ct.tableSymbol[:tableSize] - // symbol start positions - { - cumul[0] = 0 - for ui, v := range s.norm[:s.symbolLen-1] { - u := byte(ui) // one less than reference - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = u - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - } - // Encode last symbol separately to avoid overflowing u - u := int(s.symbolLen - 1) - v := s.norm[s.symbolLen-1] - if v == -1 { - // Low proba symbol - cumul[u+1] = cumul[u] + 1 - tableSymbol[highThreshold] = byte(u) - highThreshold-- - } else { - cumul[u+1] = cumul[u] + v - } - if uint32(cumul[s.symbolLen]) != tableSize { - return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) - } - cumul[s.symbolLen] = int16(tableSize) + 1 - } - // Spread symbols - s.zeroBits = false - { - step := tableStep(tableSize) - tableMask := tableSize - 1 - var position uint32 - // if any symbol > largeLimit, we may have 0 bits output. - largeLimit := int16(1 << (s.actualTableLog - 1)) - for ui, v := range s.norm[:s.symbolLen] { - symbol := byte(ui) - if v > largeLimit { - s.zeroBits = true - } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { - tableSymbol[position] = symbol - position = (position + step) & tableMask - for position > highThreshold { - position = (position + step) & tableMask - } /* Low proba area */ - } - } - - // Check if we have gone through all positions - if position != 0 { - return errors.New("position!=0") - } - } - - // Build table - table := s.ct.stateTable - { - tsi := int(tableSize) - for u, v := range tableSymbol { - // TableU16 : sorted by symbol order; gives next state value - table[cumul[v]] = uint16(tsi + u) - cumul[v]++ - } - } - - // Build Symbol Transformation Table - { - total := int16(0) - symbolTT := s.ct.symbolTT[:s.symbolLen] - tableLog := s.actualTableLog - tl := (uint32(tableLog) << 16) - (1 << tableLog) - for i, v := range s.norm[:s.symbolLen] { - switch v { - case 0: - case -1, 1: - symbolTT[i].deltaNbBits = tl - symbolTT[i].deltaFindState = total - 1 - total++ - default: - maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) - minStatePlus := uint32(v) << maxBitsOut - symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus - symbolTT[i].deltaFindState = total - v - total += v - } - } - if total != int16(tableSize) { - return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) - } - } - return nil -} - -var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} - -func (s *fseEncoder) setRLE(val byte) { - s.allocCtable() - s.actualTableLog = 0 - s.ct.stateTable = s.ct.stateTable[:1] - s.ct.symbolTT[val] = symbolTransform{ - deltaFindState: 0, - deltaNbBits: 0, - } - if debugEncoder { - println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) - } - s.rleVal = val - s.useRLE = true -} - -// setBits will set output bits for the transform. -// if nil is provided, the number of bits is equal to the index. -func (s *fseEncoder) setBits(transform []byte) { - if s.reUsed || s.preDefined { - return - } - if s.useRLE { - if transform == nil { - s.ct.symbolTT[s.rleVal].outBits = s.rleVal - s.maxBits = s.rleVal - return - } - s.maxBits = transform[s.rleVal] - s.ct.symbolTT[s.rleVal].outBits = s.maxBits - return - } - if transform == nil { - for i := range s.ct.symbolTT[:s.symbolLen] { - s.ct.symbolTT[i].outBits = uint8(i) - } - s.maxBits = uint8(s.symbolLen - 1) - return - } - s.maxBits = 0 - for i, v := range transform[:s.symbolLen] { - s.ct.symbolTT[i].outBits = v - if v > s.maxBits { - // We could assume bits always going up, but we play safe. - s.maxBits = v - } - } -} - -// normalizeCount will normalize the count of the symbols so -// the total is equal to the table size. -// If successful, compression tables will also be made ready. -func (s *fseEncoder) normalizeCount(length int) error { - if s.reUsed { - return nil - } - s.optimalTableLog(length) - var ( - tableLog = s.actualTableLog - scale = 62 - uint64(tableLog) - step = (1 << 62) / uint64(length) - vStep = uint64(1) << (scale - 20) - stillToDistribute = int16(1 << tableLog) - largest int - largestP int16 - lowThreshold = (uint32)(length >> tableLog) - ) - if s.maxCount == length { - s.useRLE = true - return nil - } - s.useRLE = false - for i, cnt := range s.count[:s.symbolLen] { - // already handled - // if (count[s] == s.length) return 0; /* rle special case */ - - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - stillToDistribute-- - } else { - proba := (int16)((uint64(cnt) * step) >> scale) - if proba < 8 { - restToBeat := vStep * uint64(rtbTable[proba]) - v := uint64(cnt)*step - (uint64(proba) << scale) - if v > restToBeat { - proba++ - } - } - if proba > largestP { - largestP = proba - largest = i - } - s.norm[i] = proba - stillToDistribute -= proba - } - } - - if -stillToDistribute >= (s.norm[largest] >> 1) { - // corner case, need another normalization method - err := s.normalizeCount2(length) - if err != nil { - return err - } - if debugAsserts { - err = s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() - } - s.norm[largest] += stillToDistribute - if debugAsserts { - err := s.validateNorm() - if err != nil { - return err - } - } - return s.buildCTable() -} - -// Secondary normalization method. -// To be used when primary method fails. -func (s *fseEncoder) normalizeCount2(length int) error { - const notYetAssigned = -2 - var ( - distributed uint32 - total = uint32(length) - tableLog = s.actualTableLog - lowThreshold = total >> tableLog - lowOne = (total * 3) >> (tableLog + 1) - ) - for i, cnt := range s.count[:s.symbolLen] { - if cnt == 0 { - s.norm[i] = 0 - continue - } - if cnt <= lowThreshold { - s.norm[i] = -1 - distributed++ - total -= cnt - continue - } - if cnt <= lowOne { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - s.norm[i] = notYetAssigned - } - toDistribute := (1 << tableLog) - distributed - - if (total / toDistribute) > lowOne { - // risk of rounding to zero - lowOne = (total * 3) / (toDistribute * 2) - for i, cnt := range s.count[:s.symbolLen] { - if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { - s.norm[i] = 1 - distributed++ - total -= cnt - continue - } - } - toDistribute = (1 << tableLog) - distributed - } - if distributed == uint32(s.symbolLen)+1 { - // all values are pretty poor; - // probably incompressible data (should have already been detected); - // find max, then give all remaining points to max - var maxV int - var maxC uint32 - for i, cnt := range s.count[:s.symbolLen] { - if cnt > maxC { - maxV = i - maxC = cnt - } - } - s.norm[maxV] += int16(toDistribute) - return nil - } - - if total == 0 { - // all of the symbols were low enough for the lowOne or lowThreshold - for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { - if s.norm[i] > 0 { - toDistribute-- - s.norm[i]++ - } - } - return nil - } - - var ( - vStepLog = 62 - uint64(tableLog) - mid = uint64((1 << (vStepLog - 1)) - 1) - rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining - tmpTotal = mid - ) - for i, cnt := range s.count[:s.symbolLen] { - if s.norm[i] == notYetAssigned { - var ( - end = tmpTotal + uint64(cnt)*rStep - sStart = uint32(tmpTotal >> vStepLog) - sEnd = uint32(end >> vStepLog) - weight = sEnd - sStart - ) - if weight < 1 { - return errors.New("weight < 1") - } - s.norm[i] = int16(weight) - tmpTotal = end - } - } - return nil -} - -// optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog -func (s *fseEncoder) optimalTableLog(length int) { - tableLog := uint8(maxEncTableLog) - minBitsSrc := highBit(uint32(length)) + 1 - minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 - minBits := uint8(minBitsSymbols) - if minBitsSrc < minBitsSymbols { - minBits = uint8(minBitsSrc) - } - - maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 - if maxBitsSrc < tableLog { - // Accuracy can be reduced - tableLog = maxBitsSrc - } - if minBits > tableLog { - tableLog = minBits - } - // Need a minimum to safely represent all symbol values - if tableLog < minEncTablelog { - tableLog = minEncTablelog - } - if tableLog > maxEncTableLog { - tableLog = maxEncTableLog - } - s.actualTableLog = tableLog -} - -// validateNorm validates the normalized histogram table. -func (s *fseEncoder) validateNorm() (err error) { - var total int - for _, v := range s.norm[:s.symbolLen] { - if v >= 0 { - total += int(v) - } else { - total -= int(v) - } - } - defer func() { - if err == nil { - return - } - fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) - for i, v := range s.norm[:s.symbolLen] { - fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) - } - }() - if total != (1 << s.actualTableLog) { - return fmt.Errorf("warning: Total == %d != %d", total, 1<> 3) + 3 + 2 - - // Write Table Size - bitStream = uint32(tableLog - minEncTablelog) - bitCount = uint(4) - remaining = int16(tableSize + 1) /* +1 for extra accuracy */ - threshold = int16(tableSize) - nbBits = uint(tableLog + 1) - outP = len(out) - ) - if cap(out) < outP+maxHeaderSize { - out = append(out, make([]byte, maxHeaderSize*3)...) - out = out[:len(out)-maxHeaderSize*3] - } - out = out[:outP+maxHeaderSize] - - // stops at 1 - for remaining > 1 { - if previous0 { - start := charnum - for s.norm[charnum] == 0 { - charnum++ - } - for charnum >= start+24 { - start += 24 - bitStream += uint32(0xFFFF) << bitCount - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - } - for charnum >= start+3 { - start += 3 - bitStream += 3 << bitCount - bitCount += 2 - } - bitStream += uint32(charnum-start) << bitCount - bitCount += 2 - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - count := s.norm[charnum] - charnum++ - max := (2*threshold - 1) - remaining - if count < 0 { - remaining += count - } else { - remaining -= count - } - count++ // +1 for extra accuracy - if count >= threshold { - count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ - } - bitStream += uint32(count) << bitCount - bitCount += nbBits - if count < max { - bitCount-- - } - - previous0 = count == 1 - if remaining < 1 { - return nil, errors.New("internal error: remaining < 1") - } - for remaining < threshold { - nbBits-- - threshold >>= 1 - } - - if bitCount > 16 { - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += 2 - bitStream >>= 16 - bitCount -= 16 - } - } - - if outP+2 > len(out) { - return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) - } - out[outP] = byte(bitStream) - out[outP+1] = byte(bitStream >> 8) - outP += int((bitCount + 7) / 8) - - if charnum > s.symbolLen { - return nil, errors.New("internal error: charnum > s.symbolLen") - } - return out[:outP], nil -} - -// Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) -// note 1 : assume symbolValue is valid (<= maxSymbolValue) -// note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * -func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { - minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 - threshold := (minNbBits + 1) << 16 - if debugAsserts { - if !(s.actualTableLog < 16) { - panic("!s.actualTableLog < 16") - } - // ensure enough room for renormalization double shift - if !(uint8(accuracyLog) < 31-s.actualTableLog) { - panic("!uint8(accuracyLog) < 31-s.actualTableLog") - } - } - tableSize := uint32(1) << s.actualTableLog - deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) - // linear interpolation (very approximate) - normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog - bitMultiplier := uint32(1) << accuracyLog - if debugAsserts { - if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { - panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") - } - if normalizedDeltaFromThreshold > bitMultiplier { - panic("normalizedDeltaFromThreshold > bitMultiplier") - } - } - return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold -} - -// Returns the cost in bits of encoding the distribution in count using ctable. -// Histogram should only be up to the last non-zero symbol. -// Returns an -1 if ctable cannot represent all the symbols in count. -func (s *fseEncoder) approxSize(hist []uint32) uint32 { - if int(s.symbolLen) < len(hist) { - // More symbols than we have. - return math.MaxUint32 - } - if s.useRLE { - // We will never reuse RLE encoders. - return math.MaxUint32 - } - const kAccuracyLog = 8 - badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog - var cost uint32 - for i, v := range hist { - if v == 0 { - continue - } - if s.norm[i] == 0 { - return math.MaxUint32 - } - bitCost := s.bitCost(uint8(i), kAccuracyLog) - if bitCost > badCost { - return math.MaxUint32 - } - cost += v * bitCost - } - return cost >> kAccuracyLog -} - -// maxHeaderSize returns the maximum header size in bits. -// This is not exact size, but we want a penalty for new tables anyway. -func (s *fseEncoder) maxHeaderSize() uint32 { - if s.preDefined { - return 0 - } - if s.useRLE { - return 8 - } - return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 -} - -// cState contains the compression state of a stream. -type cState struct { - bw *bitWriter - stateTable []uint16 - state uint16 -} - -// init will initialize the compression state to the first symbol of the stream. -func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { - c.bw = bw - c.stateTable = ct.stateTable - if len(c.stateTable) == 1 { - // RLE - c.stateTable[0] = uint16(0) - c.state = 0 - return - } - nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 - im := int32((nbBitsOut << 16) - first.deltaNbBits) - lu := (im >> nbBitsOut) + int32(first.deltaFindState) - c.state = c.stateTable[lu] -} - -// flush will write the tablelog to the output and flush the remaining full bytes. -func (c *cState) flush(tableLog uint8) { - c.bw.flush32() - c.bw.addBits16NC(c.state, tableLog) -} diff --git a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go b/vendor/github.com/klauspost/compress/zstd/fse_predefined.go deleted file mode 100644 index 474cb77d2b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/fse_predefined.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "fmt" - "math" - "sync" -) - -var ( - // fsePredef are the predefined fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredef [3]fseDecoder - - // fsePredefEnc are the predefined encoder based on fse tables as defined here: - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - // These values are already transformed. - fsePredefEnc [3]fseEncoder - - // symbolTableX contain the transformations needed for each type as defined in - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - symbolTableX [3][]baseOffset - - // maxTableSymbol is the biggest supported symbol for each table type - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#the-codes-for-literals-lengths-match-lengths-and-offsets - maxTableSymbol = [3]uint8{tableLiteralLengths: maxLiteralLengthSymbol, tableOffsets: maxOffsetLengthSymbol, tableMatchLengths: maxMatchLengthSymbol} - - // bitTables is the bits table for each table. - bitTables = [3][]byte{tableLiteralLengths: llBitsTable[:], tableOffsets: nil, tableMatchLengths: mlBitsTable[:]} -) - -type tableIndex uint8 - -const ( - // indexes for fsePredef and symbolTableX - tableLiteralLengths tableIndex = 0 - tableOffsets tableIndex = 1 - tableMatchLengths tableIndex = 2 - - maxLiteralLengthSymbol = 35 - maxOffsetLengthSymbol = 30 - maxMatchLengthSymbol = 52 -) - -// baseOffset is used for calculating transformations. -type baseOffset struct { - baseLine uint32 - addBits uint8 -} - -// fillBase will precalculate base offsets with the given bit distributions. -func fillBase(dst []baseOffset, base uint32, bits ...uint8) { - if len(bits) != len(dst) { - panic(fmt.Sprintf("len(dst) (%d) != len(bits) (%d)", len(dst), len(bits))) - } - for i, bit := range bits { - if base > math.MaxInt32 { - panic("invalid decoding table, base overflows int32") - } - - dst[i] = baseOffset{ - baseLine: base, - addBits: bit, - } - base += 1 << bit - } -} - -var predef sync.Once - -func initPredefined() { - predef.Do(func() { - // Literals length codes - tmp := make([]baseOffset, 36) - for i := range tmp[:16] { - tmp[i] = baseOffset{ - baseLine: uint32(i), - addBits: 0, - } - } - fillBase(tmp[16:], 16, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableLiteralLengths] = tmp - - // Match length codes - tmp = make([]baseOffset, 53) - for i := range tmp[:32] { - tmp[i] = baseOffset{ - // The transformation adds the 3 length. - baseLine: uint32(i) + 3, - addBits: 0, - } - } - fillBase(tmp[32:], 35, 1, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16) - symbolTableX[tableMatchLengths] = tmp - - // Offset codes - tmp = make([]baseOffset, maxOffsetBits+1) - tmp[1] = baseOffset{ - baseLine: 1, - addBits: 1, - } - fillBase(tmp[2:], 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30) - symbolTableX[tableOffsets] = tmp - - // Fill predefined tables and transform them. - // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#default-distributions - for i := range fsePredef[:] { - f := &fsePredef[i] - switch tableIndex(i) { - case tableLiteralLengths: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L243 - f.actualTableLog = 6 - copy(f.norm[:], []int16{4, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 2, 1, 1, 1, 1, 1, - -1, -1, -1, -1}) - f.symbolLen = 36 - case tableOffsets: - // https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L281 - f.actualTableLog = 5 - copy(f.norm[:], []int16{ - 1, 1, 1, 1, 1, 1, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, -1, -1, -1}) - f.symbolLen = 29 - case tableMatchLengths: - //https://github.com/facebook/zstd/blob/ededcfca57366461021c922720878c81a5854a0a/lib/decompress/zstd_decompress_block.c#L304 - f.actualTableLog = 6 - copy(f.norm[:], []int16{ - 1, 4, 3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, -1, -1, - -1, -1, -1, -1, -1}) - f.symbolLen = 53 - } - if err := f.buildDtable(); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - if err := f.transform(symbolTableX[i]); err != nil { - panic(fmt.Errorf("building table %v: %v", tableIndex(i), err)) - } - f.preDefined = true - - // Create encoder as well - enc := &fsePredefEnc[i] - copy(enc.norm[:], f.norm[:]) - enc.symbolLen = f.symbolLen - enc.actualTableLog = f.actualTableLog - if err := enc.buildCTable(); err != nil { - panic(fmt.Errorf("building encoding table %v: %v", tableIndex(i), err)) - } - enc.setBits(bitTables[i]) - enc.preDefined = true - } - }) -} diff --git a/vendor/github.com/klauspost/compress/zstd/hash.go b/vendor/github.com/klauspost/compress/zstd/hash.go deleted file mode 100644 index 5d73c21ebd..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/hash.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -const ( - prime3bytes = 506832829 - prime4bytes = 2654435761 - prime5bytes = 889523592379 - prime6bytes = 227718039650203 - prime7bytes = 58295818150454627 - prime8bytes = 0xcf1bbcdcb7a56463 -) - -// hashLen returns a hash of the lowest mls bytes of with length output bits. -// mls must be >=3 and <=8. Any other value will return hash for 4 bytes. -// length should always be < 32. -// Preferably length and mls should be a constant for inlining. -func hashLen(u uint64, length, mls uint8) uint32 { - switch mls { - case 3: - return (uint32(u<<8) * prime3bytes) >> (32 - length) - case 5: - return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length)) - case 6: - return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length)) - case 7: - return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length)) - case 8: - return uint32((u * prime8bytes) >> (64 - length)) - default: - return (uint32(u) * prime4bytes) >> (32 - length) - } -} diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go deleted file mode 100644 index 09164856d2..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "github.com/klauspost/compress/huff0" -) - -// history contains the information transferred between blocks. -type history struct { - // Literal decompression - huffTree *huff0.Scratch - - // Sequence decompression - decoders sequenceDecs - recentOffsets [3]int - - // History buffer... - b []byte - - // ignoreBuffer is meant to ignore a number of bytes - // when checking for matches in history - ignoreBuffer int - - windowSize int - allocFrameBuffer int // needed? - error bool - dict *dict -} - -// reset will reset the history to initial state of a frame. -// The history must already have been initialized to the desired size. -func (h *history) reset() { - h.b = h.b[:0] - h.ignoreBuffer = 0 - h.error = false - h.recentOffsets = [3]int{1, 4, 8} - h.decoders.freeDecoders() - h.decoders = sequenceDecs{br: h.decoders.br} - h.freeHuffDecoder() - h.huffTree = nil - h.dict = nil - //printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b)) -} - -func (h *history) freeHuffDecoder() { - if h.huffTree != nil { - if h.dict == nil || h.dict.litEnc != h.huffTree { - huffDecoderPool.Put(h.huffTree) - h.huffTree = nil - } - } -} - -func (h *history) setDict(dict *dict) { - if dict == nil { - return - } - h.dict = dict - h.decoders.litLengths = dict.llDec - h.decoders.offsets = dict.ofDec - h.decoders.matchLengths = dict.mlDec - h.decoders.dict = dict.content - h.recentOffsets = dict.offsets - h.huffTree = dict.litEnc -} - -// append bytes to history. -// This function will make sure there is space for it, -// if the buffer has been allocated with enough extra space. -func (h *history) append(b []byte) { - if len(b) >= h.windowSize { - // Discard all history by simply overwriting - h.b = h.b[:h.windowSize] - copy(h.b, b[len(b)-h.windowSize:]) - return - } - - // If there is space, append it. - if len(b) < cap(h.b)-len(h.b) { - h.b = append(h.b, b...) - return - } - - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(b) + len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] - copy(h.b[h.windowSize-len(b):], b) -} - -// ensureBlock will ensure there is space for at least one block... -func (h *history) ensureBlock() { - if cap(h.b) < h.allocFrameBuffer { - h.b = make([]byte, 0, h.allocFrameBuffer) - return - } - - avail := cap(h.b) - len(h.b) - if avail >= h.windowSize || avail > maxCompressedBlockSize { - return - } - // Move data down so we only have window size left. - // We know we have less than window size in b at this point. - discard := len(h.b) - h.windowSize - copy(h.b, h.b[discard:]) - h.b = h.b[:h.windowSize] -} - -// append bytes to history without ever discarding anything. -func (h *history) appendKeep(b []byte) { - h.b = append(h.b, b...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt deleted file mode 100644 index 24b53065f4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md deleted file mode 100644 index 777290d44c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# xxhash - -VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package. - -xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -This package provides a straightforward API: - -``` -func Sum64(b []byte) uint64 -func Sum64String(s string) uint64 -type Digest struct{ ... } - func New() *Digest -``` - -The `Digest` type implements hash.Hash64. Its key methods are: - -``` -func (*Digest) Write([]byte) (int, error) -func (*Digest) WriteString(string) (int, error) -func (*Digest) Sum64() uint64 -``` - -The package is written with optimized pure Go and also contains even faster -assembly implementations for amd64 and arm64. If desired, the `purego` build tag -opts into using the Go code even on those architectures. - -[xxHash]: http://cyan4973.github.io/xxHash/ - -## Compatibility - -This package is in a module and the latest code is in version 2 of the module. -You need a version of Go with at least "minimal module compatibility" to use -github.com/cespare/xxhash/v2: - -* 1.9.7+ for Go 1.9 -* 1.10.3+ for Go 1.10 -* Go 1.11 or later - -I recommend using the latest release of Go. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64. - -| input size | purego | asm | -| ---------- | --------- | --------- | -| 4 B | 1.3 GB/s | 1.2 GB/s | -| 16 B | 2.9 GB/s | 3.5 GB/s | -| 100 B | 6.9 GB/s | 8.1 GB/s | -| 4 KB | 11.7 GB/s | 16.7 GB/s | -| 10 MB | 12.0 GB/s | 17.3 GB/s | - -These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C -CPU using the following commands under Go 1.19.2: - -``` -benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$') -benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$') -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) -- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics) -- [FreeCache](https://github.com/coocood/freecache) -- [FastCache](https://github.com/VictoriaMetrics/fastcache) diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go deleted file mode 100644 index fc40c82001..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go +++ /dev/null @@ -1,230 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -// THIS IS VENDORED: Go to github.com/cespare/xxhash for original package. - -package xxhash - -import ( - "encoding/binary" - "errors" - "math/bits" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// Store the primes in an array as well. -// -// The consts are used when possible in Go code to avoid MOVs but we need a -// contiguous array of the assembly code. -var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5} - -// Digest implements hash.Hash64. -type Digest struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total uint64 - mem [32]byte - n int // how much of mem is used -} - -// New creates a new Digest that computes the 64-bit xxHash algorithm. -func New() *Digest { - var d Digest - d.Reset() - return &d -} - -// Reset clears the Digest's state so that it can be reused. -func (d *Digest) Reset() { - d.v1 = primes[0] + prime2 - d.v2 = prime2 - d.v3 = 0 - d.v4 = -primes[0] - d.total = 0 - d.n = 0 -} - -// Size always returns 8 bytes. -func (d *Digest) Size() int { return 8 } - -// BlockSize always returns 32 bytes. -func (d *Digest) BlockSize() int { return 32 } - -// Write adds more data to d. It always returns len(b), nil. -func (d *Digest) Write(b []byte) (n int, err error) { - n = len(b) - d.total += uint64(n) - - memleft := d.mem[d.n&(len(d.mem)-1):] - - if d.n+n < 32 { - // This new data doesn't even fill the current block. - copy(memleft, b) - d.n += n - return - } - - if d.n > 0 { - // Finish off the partial block. - c := copy(memleft, b) - d.v1 = round(d.v1, u64(d.mem[0:8])) - d.v2 = round(d.v2, u64(d.mem[8:16])) - d.v3 = round(d.v3, u64(d.mem[16:24])) - d.v4 = round(d.v4, u64(d.mem[24:32])) - b = b[c:] - d.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - nw := writeBlocks(d, b) - b = b[nw:] - } - - // Store any remaining partial block. - copy(d.mem[:], b) - d.n = len(b) - - return -} - -// Sum appends the current hash to b and returns the resulting slice. -func (d *Digest) Sum(b []byte) []byte { - s := d.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -// Sum64 returns the current hash. -func (d *Digest) Sum64() uint64 { - var h uint64 - - if d.total >= 32 { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = d.v3 + prime5 - } - - h += d.total - - b := d.mem[:d.n&(len(d.mem)-1)] - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -const ( - magic = "xxh\x06" - marshaledSize = len(magic) + 8*5 + 32 -) - -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (d *Digest) MarshalBinary() ([]byte, error) { - b := make([]byte, 0, marshaledSize) - b = append(b, magic...) - b = appendUint64(b, d.v1) - b = appendUint64(b, d.v2) - b = appendUint64(b, d.v3) - b = appendUint64(b, d.v4) - b = appendUint64(b, d.total) - b = append(b, d.mem[:d.n]...) - b = b[:len(b)+len(d.mem)-d.n] - return b, nil -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -func (d *Digest) UnmarshalBinary(b []byte) error { - if len(b) < len(magic) || string(b[:len(magic)]) != magic { - return errors.New("xxhash: invalid hash state identifier") - } - if len(b) != marshaledSize { - return errors.New("xxhash: invalid hash state size") - } - b = b[len(magic):] - b, d.v1 = consumeUint64(b) - b, d.v2 = consumeUint64(b) - b, d.v3 = consumeUint64(b) - b, d.v4 = consumeUint64(b) - b, d.total = consumeUint64(b) - copy(d.mem[:], b) - d.n = int(d.total % uint64(len(d.mem))) - return nil -} - -func appendUint64(b []byte, x uint64) []byte { - var a [8]byte - binary.LittleEndian.PutUint64(a[:], x) - return append(b, a[:]...) -} - -func consumeUint64(b []byte) ([]byte, uint64) { - x := u64(b) - return b[8:], x -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s deleted file mode 100644 index ddb63aa91b..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,210 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define h AX -#define d AX -#define p SI // pointer to advance through b -#define n DX -#define end BX // loop end -#define v1 R8 -#define v2 R9 -#define v3 R10 -#define v4 R11 -#define x R12 -#define prime1 R13 -#define prime2 R14 -#define prime4 DI - -#define round(acc, x) \ - IMULQ prime2, x \ - ADDQ x, acc \ - ROLQ $31, acc \ - IMULQ prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - IMULQ prime2, x \ - ROLQ $31, x \ - IMULQ prime1, x - -// mergeRound applies a merge round on the two registers acc and x. -// It assumes that prime1, prime2, and prime4 have been loaded. -#define mergeRound(acc, x) \ - round0(x) \ - XORQ x, acc \ - IMULQ prime1, acc \ - ADDQ prime4, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that there is at least one block -// to process. -#define blockLoop() \ -loop: \ - MOVQ +0(p), x \ - round(v1, x) \ - MOVQ +8(p), x \ - round(v2, x) \ - MOVQ +16(p), x \ - round(v3, x) \ - MOVQ +24(p), x \ - round(v4, x) \ - ADDQ $32, p \ - CMPQ p, end \ - JLE loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - // Load fixed primes. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - MOVQ ·primes+24(SB), prime4 - - // Load slice. - MOVQ b_base+0(FP), p - MOVQ b_len+8(FP), n - LEAQ (p)(n*1), end - - // The first loop limit will be len(b)-32. - SUBQ $32, end - - // Check whether we have at least one block. - CMPQ n, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ prime1, v1 - ADDQ prime2, v1 - MOVQ prime2, v2 - XORQ v3, v3 - XORQ v4, v4 - SUBQ prime1, v4 - - blockLoop() - - MOVQ v1, h - ROLQ $1, h - MOVQ v2, x - ROLQ $7, x - ADDQ x, h - MOVQ v3, x - ROLQ $12, x - ADDQ x, h - MOVQ v4, x - ROLQ $18, x - ADDQ x, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - - JMP afterBlocks - -noBlocks: - MOVQ ·primes+32(SB), h - -afterBlocks: - ADDQ n, h - - ADDQ $24, end - CMPQ p, end - JG try4 - -loop8: - MOVQ (p), x - ADDQ $8, p - round0(x) - XORQ x, h - ROLQ $27, h - IMULQ prime1, h - ADDQ prime4, h - - CMPQ p, end - JLE loop8 - -try4: - ADDQ $4, end - CMPQ p, end - JG try1 - - MOVL (p), x - ADDQ $4, p - IMULQ prime1, x - XORQ x, h - - ROLQ $23, h - IMULQ prime2, h - ADDQ ·primes+16(SB), h - -try1: - ADDQ $4, end - CMPQ p, end - JGE finalize - -loop1: - MOVBQZX (p), x - ADDQ $1, p - IMULQ ·primes+32(SB), x - XORQ x, h - ROLQ $11, h - IMULQ prime1, h - - CMPQ p, end - JL loop1 - -finalize: - MOVQ h, x - SHRQ $33, x - XORQ x, h - IMULQ prime2, h - MOVQ h, x - SHRQ $29, x - XORQ x, h - IMULQ ·primes+16(SB), h - MOVQ h, x - SHRQ $32, x - XORQ x, h - - MOVQ h, ret+24(FP) - RET - -// func writeBlocks(d *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - // Load fixed primes needed for round. - MOVQ ·primes+0(SB), prime1 - MOVQ ·primes+8(SB), prime2 - - // Load slice. - MOVQ b_base+8(FP), p - MOVQ b_len+16(FP), n - LEAQ (p)(n*1), end - SUBQ $32, end - - // Load vN from d. - MOVQ s+0(FP), d - MOVQ 0(d), v1 - MOVQ 8(d), v2 - MOVQ 16(d), v3 - MOVQ 24(d), v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. - blockLoop() - - // Copy vN back to d. - MOVQ v1, 0(d) - MOVQ v2, 8(d) - MOVQ v3, 16(d) - MOVQ v4, 24(d) - - // The number of bytes written is p minus the old base pointer. - SUBQ b_base+8(FP), p - MOVQ p, ret+32(FP) - - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s deleted file mode 100644 index ae7d4d3295..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s +++ /dev/null @@ -1,184 +0,0 @@ -//go:build !appengine && gc && !purego && !noasm -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -#include "textflag.h" - -// Registers: -#define digest R1 -#define h R2 // return value -#define p R3 // input pointer -#define n R4 // input length -#define nblocks R5 // n / 32 -#define prime1 R7 -#define prime2 R8 -#define prime3 R9 -#define prime4 R10 -#define prime5 R11 -#define v1 R12 -#define v2 R13 -#define v3 R14 -#define v4 R15 -#define x1 R20 -#define x2 R21 -#define x3 R22 -#define x4 R23 - -#define round(acc, x) \ - MADD prime2, acc, x, acc \ - ROR $64-31, acc \ - MUL prime1, acc - -// round0 performs the operation x = round(0, x). -#define round0(x) \ - MUL prime2, x \ - ROR $64-31, x \ - MUL prime1, x - -#define mergeRound(acc, x) \ - round0(x) \ - EOR x, acc \ - MADD acc, prime4, prime1, acc - -// blockLoop processes as many 32-byte blocks as possible, -// updating v1, v2, v3, and v4. It assumes that n >= 32. -#define blockLoop() \ - LSR $5, n, nblocks \ - PCALIGN $16 \ - loop: \ - LDP.P 16(p), (x1, x2) \ - LDP.P 16(p), (x3, x4) \ - round(v1, x1) \ - round(v2, x2) \ - round(v3, x3) \ - round(v4, x4) \ - SUB $1, nblocks \ - CBNZ nblocks, loop - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32 - LDP b_base+0(FP), (p, n) - - LDP ·primes+0(SB), (prime1, prime2) - LDP ·primes+16(SB), (prime3, prime4) - MOVD ·primes+32(SB), prime5 - - CMP $32, n - CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 } - BLT afterLoop - - ADD prime1, prime2, v1 - MOVD prime2, v2 - MOVD $0, v3 - NEG prime1, v4 - - blockLoop() - - ROR $64-1, v1, x1 - ROR $64-7, v2, x2 - ADD x1, x2 - ROR $64-12, v3, x3 - ROR $64-18, v4, x4 - ADD x3, x4 - ADD x2, x4, h - - mergeRound(h, v1) - mergeRound(h, v2) - mergeRound(h, v3) - mergeRound(h, v4) - -afterLoop: - ADD n, h - - TBZ $4, n, try8 - LDP.P 16(p), (x1, x2) - - round0(x1) - - // NOTE: here and below, sequencing the EOR after the ROR (using a - // rotated register) is worth a small but measurable speedup for small - // inputs. - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - - round0(x2) - ROR $64-27, h - EOR x2 @> 64-27, h, h - MADD h, prime4, prime1, h - -try8: - TBZ $3, n, try4 - MOVD.P 8(p), x1 - - round0(x1) - ROR $64-27, h - EOR x1 @> 64-27, h, h - MADD h, prime4, prime1, h - -try4: - TBZ $2, n, try2 - MOVWU.P 4(p), x2 - - MUL prime1, x2 - ROR $64-23, h - EOR x2 @> 64-23, h, h - MADD h, prime3, prime2, h - -try2: - TBZ $1, n, try1 - MOVHU.P 2(p), x3 - AND $255, x3, x1 - LSR $8, x3, x2 - - MUL prime5, x1 - ROR $64-11, h - EOR x1 @> 64-11, h, h - MUL prime1, h - - MUL prime5, x2 - ROR $64-11, h - EOR x2 @> 64-11, h, h - MUL prime1, h - -try1: - TBZ $0, n, finalize - MOVBU (p), x4 - - MUL prime5, x4 - ROR $64-11, h - EOR x4 @> 64-11, h, h - MUL prime1, h - -finalize: - EOR h >> 33, h - MUL prime2, h - EOR h >> 29, h - MUL prime3, h - EOR h >> 32, h - - MOVD h, ret+24(FP) - RET - -// func writeBlocks(s *Digest, b []byte) int -TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40 - LDP ·primes+0(SB), (prime1, prime2) - - // Load state. Assume v[1-4] are stored contiguously. - MOVD s+0(FP), digest - LDP 0(digest), (v1, v2) - LDP 16(digest), (v3, v4) - - LDP b_base+8(FP), (p, n) - - blockLoop() - - // Store updated state. - STP (v1, v2), 0(digest) - STP (v3, v4), 16(digest) - - BIC $31, n - MOVD n, ret+32(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go deleted file mode 100644 index d4221edf4f..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build (amd64 || arm64) && !appengine && gc && !purego && !noasm -// +build amd64 arm64 -// +build !appengine -// +build gc -// +build !purego -// +build !noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -//go:noescape -func writeBlocks(s *Digest, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go deleted file mode 100644 index 0be16cefc7..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go +++ /dev/null @@ -1,76 +0,0 @@ -//go:build (!amd64 && !arm64) || appengine || !gc || purego || noasm -// +build !amd64,!arm64 appengine !gc purego noasm - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // d := New() - // d.Write(b) - // return d.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := primes[0] + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -primes[0] - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - for ; len(b) >= 8; b = b[8:] { - k1 := round(0, u64(b[:8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if len(b) >= 4 { - h ^= uint64(u32(b[:4])) * prime1 - h = rol23(h)*prime2 + prime3 - b = b[4:] - } - for ; len(b) > 0; b = b[1:] { - h ^= uint64(b[0]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(d *Digest, b []byte) int { - v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4 - n := len(b) - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4 - return n - len(b) -} diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go deleted file mode 100644 index 6f3b0cb102..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_safe.go +++ /dev/null @@ -1,11 +0,0 @@ -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} - -// WriteString adds more data to d. It always returns len(s), nil. -func (d *Digest) WriteString(s string) (n int, err error) { - return d.Write([]byte(s)) -} diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go deleted file mode 100644 index f41932b7a4..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s deleted file mode 100644 index 0782b86e3d..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go deleted file mode 100644 index 57b9c31c02..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "encoding/binary" - "math/bits" -) - -// matchLen returns the maximum common prefix length of a and b. -// a must be the shortest of the two. -func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) - if diff != 0 { - return n + bits.TrailingZeros64(diff)>>3 - } - n += 8 - } - - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n - -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go deleted file mode 100644 index d7fe6d82d9..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "errors" - "fmt" - "io" -) - -type seq struct { - litLen uint32 - matchLen uint32 - offset uint32 - - // Codes are stored here for the encoder - // so they only have to be looked up once. - llCode, mlCode, ofCode uint8 -} - -type seqVals struct { - ll, ml, mo int -} - -func (s seq) String() string { - if s.offset <= 3 { - if s.offset == 0 { - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset: INVALID (0)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset, " (repeat)") - } - return fmt.Sprint("litLen:", s.litLen, ", matchLen:", s.matchLen+zstdMinMatch, ", offset:", s.offset-3, " (new)") -} - -type seqCompMode uint8 - -const ( - compModePredefined seqCompMode = iota - compModeRLE - compModeFSE - compModeRepeat -) - -type sequenceDec struct { - // decoder keeps track of the current state and updates it from the bitstream. - fse *fseDecoder - state fseState - repeat bool -} - -// init the state of the decoder with input from stream. -func (s *sequenceDec) init(br *bitReader) error { - if s.fse == nil { - return errors.New("sequence decoder not defined") - } - s.state.init(br, s.fse.actualTableLog, s.fse.dt[:1< cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Copy from dictionary... - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // we may be in dictionary. - dictO := len(s.dict) - (seq.mo - (t + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) - } - end := dictO + seq.ml - if end > len(s.dict) { - n := len(s.dict) - dictO - copy(out[t:], s.dict[dictO:]) - t += n - seq.ml -= n - } else { - copy(out[t:], s.dict[dictO:end]) - t += end - dictO - continue - } - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - // We must be in current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - continue - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} - -// decode sequences from the stream with the provided history. -func (s *sequenceDecs) decodeSync(hist []byte) error { - supported, err := s.decodeSyncSimple(hist) - if supported { - return err - } - - br := s.br - seqs := s.nSeqs - startSize := len(s.out) - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - if debugDecoder { - println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") - } - for i := seqs - 1; i >= 0; i-- { - if br.overread() { - printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain()) - return io.ErrUnexpectedEOF - } - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", seqs-i-1, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - - if ll > len(s.literals) { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) - } - size := ll + ml + len(out) - if size-startSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if size > cap(out) { - // Not enough size, which can happen under high volume block streaming conditions - // but could be if destination slice is too small for sync operations. - // over-allocating here can create a large amount of GC pressure so we try to keep - // it as contained as possible - used := len(out) - startSize - addBytes := 256 + ll + ml + used>>2 - // Clamp to max block size. - if used+addBytes > maxBlockSize { - addBytes = maxBlockSize - used - } - out = append(out, make([]byte, addBytes)...) - out = out[:len(out)-addBytes] - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - - // Add literals - out = append(out, s.literals[:ll]...) - s.literals = s.literals[ll:] - - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - - if mo > len(out)+len(hist) || mo > s.windowSize { - if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - - // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(out) + len(hist))) - if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)-startSize) - } - end := dictO + ml - if end > len(s.dict) { - out = append(out, s.dict[dictO:]...) - ml -= len(s.dict) - dictO - } else { - out = append(out, s.dict[dictO:end]...) - mo = 0 - ml = 0 - } - } - - // Copy from history. - // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(out); v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if ml > v { - // Some goes into current block. - // Copy remainder of history - out = append(out, hist[start:]...) - ml -= v - } else { - out = append(out, hist[start:start+ml]...) - ml = 0 - } - } - // We must be in current buffer now - if ml > 0 { - start := len(out) - mo - if ml <= len(out)-start { - // No overlap - out = append(out, out[start:start+ml]...) - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - out = out[:len(out)+ml] - src := out[start : start+ml] - // Destination is the space we just added. - dst := out[len(out)-ml:] - dst = dst[:len(src)] - for i := range src { - dst[i] = src[i] - } - } - } - if i == 0 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - - if size := len(s.literals) + len(out) - startSize; size > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - - // Add final literals - s.out = append(out, s.literals...) - return br.close() -} - -var bitMask [16]uint16 - -func init() { - for i := range bitMask[:] { - bitMask[i] = uint16((1 << uint(i)) - 1) - } -} - -func (s *sequenceDecs) next(br *bitReader, llState, mlState, ofState decSymbol) (ll, mo, ml int) { - // Final will not read from stream. - ll, llB := llState.final() - ml, mlB := mlState.final() - mo, moB := ofState.final() - - // extra bits are stored in reverse order. - br.fill() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fill() - } - // matchlength+literal length, max 32 bits - ml += br.getBits(mlB) - ll += br.getBits(llB) - mo = s.adjustOffset(mo, ll, moB) - return -} - -func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { - if offsetB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = offset - return offset - } - - if litLen == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - offset++ - } - - if offset == 0 { - return s.prevOffset[0] - } - var temp int - if offset == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[offset] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") - temp = 1 - } - - if offset != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - return temp -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go deleted file mode 100644 index c59f17e07a..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ /dev/null @@ -1,394 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -package zstd - -import ( - "fmt" - "io" - - "github.com/klauspost/compress/internal/cpuinfo" -) - -type decodeSyncAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - litRemain int - out []byte - outPosition int - literals []byte - litPosition int - history []byte - windowSize int - ll int // set on error (not for all errors, please refer to _generate/gen.go) - ml int // set on error (not for all errors, please refer to _generate/gen.go) - mo int // set on error (not for all errors, please refer to _generate/gen.go) -} - -// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer. -// -//go:noescape -func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int - -// decode sequences from the stream with the provided history but without a dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - if len(s.dict) > 0 { - return false, nil - } - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize { - return false, nil - } - - // FIXME: Using unsafe memory copies leads to rare, random crashes - // with fuzz testing. It is therefore disabled for now. - const useSafe = true - /* - useSafe := false - if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc { - useSafe = true - } - if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) { - useSafe = true - } - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - useSafe = true - } - */ - - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeSyncAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - iteration: s.nSeqs - 1, - litRemain: len(s.literals), - out: s.out, - outPosition: len(s.out), - literals: s.literals, - windowSize: s.windowSize, - history: hist, - } - - s.seqSize = 0 - startSize := len(s.out) - - var errCode int - if cpuinfo.HasBMI2() { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_bmi2(s, br, &ctx) - } - } else { - if useSafe { - errCode = sequenceDecs_decodeSync_safe_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decodeSync_amd64(s, br, &ctx) - } - } - switch errCode { - case noError: - break - - case errorMatchLenOfsMismatch: - return true, fmt.Errorf("zero matchoff and matchlen (%d) > 0", ctx.ml) - - case errorMatchLenTooBig: - return true, fmt.Errorf("match len (%d) bigger than max allowed length", ctx.ml) - - case errorMatchOffTooBig: - return true, fmt.Errorf("match offset (%d) bigger than current history (%d)", - ctx.mo, ctx.outPosition+len(hist)-startSize) - - case errorNotEnoughLiterals: - return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", - ctx.ll, ctx.litRemain+ctx.ll) - - case errorOverread: - return true, io.ErrUnexpectedEOF - - case errorNotEnoughSpace: - size := ctx.outPosition + ctx.ll + ctx.ml - if debugDecoder { - println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize) - } - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - - default: - return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - return true, err - } - - s.literals = s.literals[ctx.litPosition:] - t := ctx.outPosition - s.out = s.out[:t] - - // Add final literals - s.out = append(s.out, s.literals...) - if debugDecoder { - t += len(s.literals) - if t != len(s.out) { - panic(fmt.Errorf("length mismatch, want %d, got %d", len(s.out), t)) - } - } - - return true, nil -} - -// -------------------------------------------------------------------------------- - -type decodeAsmContext struct { - llTable []decSymbol - mlTable []decSymbol - ofTable []decSymbol - llState uint64 - mlState uint64 - ofState uint64 - iteration int - seqs []seqVals - litRemain int -} - -const noError = 0 - -// error reported when mo == 0 && ml > 0 -const errorMatchLenOfsMismatch = 1 - -// error reported when ml > maxMatchLen -const errorMatchLenTooBig = 2 - -// error reported when mo > available history or mo > s.windowSize -const errorMatchOffTooBig = 3 - -// error reported when the sum of literal lengths exeeceds the literal buffer size -const errorNotEnoughLiterals = 4 - -// error reported when capacity of `out` is too small -const errorNotEnoughSpace = 5 - -// error reported when bits are overread. -const errorOverread = 6 - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions. -// -//go:noescape -func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - - ctx := decodeAsmContext{ - llTable: s.litLengths.fse.dt[:maxTablesize], - mlTable: s.matchLengths.fse.dt[:maxTablesize], - ofTable: s.offsets.fse.dt[:maxTablesize], - llState: uint64(s.litLengths.state.state), - mlState: uint64(s.matchLengths.state.state), - ofState: uint64(s.offsets.state.state), - seqs: seqs, - iteration: len(seqs) - 1, - litRemain: len(s.literals), - } - - if debugDecoder { - println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream") - } - - s.seqSize = 0 - lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56 - var errCode int - if cpuinfo.HasBMI2() { - if lte56bits { - errCode = sequenceDecs_decode_56_bmi2(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_bmi2(s, br, &ctx) - } - } else { - if lte56bits { - errCode = sequenceDecs_decode_56_amd64(s, br, &ctx) - } else { - errCode = sequenceDecs_decode_amd64(s, br, &ctx) - } - } - if errCode != 0 { - i := len(seqs) - ctx.iteration - 1 - switch errCode { - case errorMatchLenOfsMismatch: - ml := ctx.seqs[i].ml - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - - case errorMatchLenTooBig: - ml := ctx.seqs[i].ml - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - - case errorNotEnoughLiterals: - ll := ctx.seqs[i].ll - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll) - case errorOverread: - return io.ErrUnexpectedEOF - } - - return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) - } - - if ctx.litRemain < 0 { - return fmt.Errorf("literal count is too big: total available %d, total requested %d", - len(s.literals), len(s.literals)-ctx.litRemain) - } - - s.seqSize += ctx.litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - if debugDecoder { - println("decode: ", br.remain(), "bits remain on stream. code:", errCode) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// -------------------------------------------------------------------------------- - -type executeAsmContext struct { - seqs []seqVals - seqIndex int - out []byte - history []byte - literals []byte - outPosition int - litPosition int - windowSize int -} - -// sequenceDecs_executeSimple_amd64 implements the main loop of sequenceDecs.executeSimple in x86 asm. -// -// Returns false if a match offset is too big. -// -// Please refer to seqdec_generic.go for the reference implementation. -// -//go:noescape -func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool - -// Same as above, but with safe memcopies -// -//go:noescape -func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool - -// executeSimple handles cases when dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize+compressedBlockOverAlloc > cap(s.out) { - addBytes := s.seqSize + len(s.out) + compressedBlockOverAlloc - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - ctx := executeAsmContext{ - seqs: seqs, - seqIndex: 0, - out: out, - history: hist, - outPosition: t, - litPosition: 0, - literals: s.literals, - windowSize: s.windowSize, - } - var ok bool - if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc { - ok = sequenceDecs_executeSimple_safe_amd64(&ctx) - } else { - ok = sequenceDecs_executeSimple_amd64(&ctx) - } - if !ok { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", - seqs[ctx.seqIndex].mo, ctx.outPosition+len(hist)) - } - s.literals = s.literals[ctx.litPosition:] - t = ctx.outPosition - - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s deleted file mode 100644 index f5591fa1e8..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ /dev/null @@ -1,4151 +0,0 @@ -// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT. - -//go:build !appengine && !noasm && gc && !noasm - -// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_end - -sequenceDecs_decode_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_byte_by_byte - -sequenceDecs_decode_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decode_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_amd64_fill_2_end - -sequenceDecs_decode_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte - -sequenceDecs_decode_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_amd64_adjust_offset_nonzero - -sequenceDecs_decode_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_amd64_after_adjust - -sequenceDecs_decode_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_amd64_adjust_zero - JEQ sequenceDecs_decode_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_amd64_adjust_three - JMP sequenceDecs_decode_amd64_adjust_two - -sequenceDecs_decode_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_amd64_adjust_test_temp_valid - -sequenceDecs_decode_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: CMOV -TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - MOVQ 104(AX), R10 - MOVQ s+0(FP), AX - MOVQ 144(AX), R11 - MOVQ 152(AX), R12 - MOVQ 160(AX), R13 - -sequenceDecs_decode_56_amd64_main_loop: - MOVQ (SP), R14 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decode_56_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R14 - MOVQ (R14), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decode_56_amd64_fill_end - -sequenceDecs_decode_56_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decode_56_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decode_56_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R14 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R14), AX - ORQ AX, DX - JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte - -sequenceDecs_decode_56_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decode_56_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_of_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_of_update_zero: - MOVQ AX, 16(R10) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ml_update_zero: - MOVQ AX, 8(R10) - - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R15 - SHLQ CL, R15 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decode_56_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decode_56_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decode_56_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R15 - ADDQ R15, AX - -sequenceDecs_decode_56_amd64_ll_update_zero: - MOVQ AX, (R10) - - // Fill bitreader for state updates - MOVQ R14, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R14 - SHRL $0x10, DI - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R14 - SHRL $0x10, R8 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R14 - SHRL $0x10, R9 - LEAQ (BX)(R14*1), CX - MOVQ DX, R15 - MOVQ CX, BX - ROLQ CL, R15 - MOVL $0x00000001, BP - MOVB R14, CL - SHLL CL, BP - DECL BP - ANDQ BP, R15 - ADDQ R15, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decode_56_amd64_skip_update: - // Adjust offset - MOVQ 16(R10), CX - CMPQ AX, $0x01 - JBE sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0 - MOVQ R12, R13 - MOVQ R11, R12 - MOVQ CX, R11 - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offsetB_1_or_0: - CMPQ (R10), $0x00000000 - JNE sequenceDecs_decode_56_amd64_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_amd64_adjust_offset_nonzero - -sequenceDecs_decode_56_amd64_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_adjust_offset_nonzero - MOVQ R11, CX - JMP sequenceDecs_decode_56_amd64_after_adjust - -sequenceDecs_decode_56_amd64_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_amd64_adjust_zero - JEQ sequenceDecs_decode_56_amd64_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_amd64_adjust_three - JMP sequenceDecs_decode_56_amd64_adjust_two - -sequenceDecs_decode_56_amd64_adjust_zero: - MOVQ R11, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_one: - MOVQ R12, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_two: - MOVQ R13, AX - JMP sequenceDecs_decode_56_amd64_adjust_test_temp_valid - -sequenceDecs_decode_56_amd64_adjust_three: - LEAQ -1(R11), AX - -sequenceDecs_decode_56_amd64_adjust_test_temp_valid: - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_adjust_temp_valid - MOVQ $0x00000001, AX - -sequenceDecs_decode_56_amd64_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R12, R13 - MOVQ R11, R12 - MOVQ AX, R11 - MOVQ AX, CX - -sequenceDecs_decode_56_amd64_after_adjust: - MOVQ CX, 16(R10) - - // Check values - MOVQ 8(R10), AX - MOVQ (R10), R14 - LEAQ (AX)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decode_56_amd64_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_amd64_match_len_ofs_ok: - ADDQ $0x18, R10 - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decode_56_amd64_main_loop - MOVQ s+0(FP), AX - MOVQ R11, 144(AX) - MOVQ R12, 152(AX) - MOVQ R13, 160(AX) - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_amd64_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_amd64_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_end - -sequenceDecs_decode_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_byte_by_byte - -sequenceDecs_decode_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decode_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_bmi2_fill_2_end - -sequenceDecs_decode_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte - -sequenceDecs_decode_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_bmi2_after_adjust - -sequenceDecs_decode_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_bmi2_adjust_zero - JEQ sequenceDecs_decode_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_bmi2_adjust_three - JMP sequenceDecs_decode_bmi2_adjust_two - -sequenceDecs_decode_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int -// Requires: BMI, BMI2, CMOV -TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - MOVQ 104(CX), R9 - MOVQ s+0(FP), CX - MOVQ 144(CX), R10 - MOVQ 152(CX), R11 - MOVQ 160(CX), R12 - -sequenceDecs_decode_56_bmi2_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decode_56_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R13 - MOVQ (R13), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decode_56_bmi2_fill_end - -sequenceDecs_decode_56_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decode_56_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decode_56_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R13 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R13), CX - ORQ CX, AX - JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte - -sequenceDecs_decode_56_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decode_56_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 16(R9) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, 8(R9) - - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R14 - MOVQ AX, R15 - LEAQ (DX)(R14*1), CX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R15, CX - MOVQ CX, (R9) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decode_56_bmi2_skip_update - LEAQ (SI)(DI*1), R14 - ADDQ R8, R14 - MOVBQZX R14, R14 - LEAQ (DX)(R14*1), CX - MOVQ AX, R15 - MOVQ CX, DX - ROLQ CL, R15 - BZHIQ R14, R15, R15 - - // Update Offset State - BZHIQ R8, R15, CX - SHRXQ R8, R15, R15 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R15, CX - SHRXQ DI, R15, R15 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R15, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decode_56_bmi2_skip_update: - // Adjust offset - MOVQ 16(R9), CX - CMPQ R13, $0x01 - JBE sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0 - MOVQ R11, R12 - MOVQ R10, R11 - MOVQ CX, R10 - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offsetB_1_or_0: - CMPQ (R9), $0x00000000 - JNE sequenceDecs_decode_56_bmi2_adjust_offset_maybezero - INCQ CX - JMP sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - -sequenceDecs_decode_56_bmi2_adjust_offset_maybezero: - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_adjust_offset_nonzero - MOVQ R10, CX - JMP sequenceDecs_decode_56_bmi2_after_adjust - -sequenceDecs_decode_56_bmi2_adjust_offset_nonzero: - CMPQ CX, $0x01 - JB sequenceDecs_decode_56_bmi2_adjust_zero - JEQ sequenceDecs_decode_56_bmi2_adjust_one - CMPQ CX, $0x02 - JA sequenceDecs_decode_56_bmi2_adjust_three - JMP sequenceDecs_decode_56_bmi2_adjust_two - -sequenceDecs_decode_56_bmi2_adjust_zero: - MOVQ R10, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_one: - MOVQ R11, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_two: - MOVQ R12, R13 - JMP sequenceDecs_decode_56_bmi2_adjust_test_temp_valid - -sequenceDecs_decode_56_bmi2_adjust_three: - LEAQ -1(R10), R13 - -sequenceDecs_decode_56_bmi2_adjust_test_temp_valid: - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_adjust_temp_valid - MOVQ $0x00000001, R13 - -sequenceDecs_decode_56_bmi2_adjust_temp_valid: - CMPQ CX, $0x01 - CMOVQNE R11, R12 - MOVQ R10, R11 - MOVQ R13, R10 - MOVQ R13, CX - -sequenceDecs_decode_56_bmi2_after_adjust: - MOVQ CX, 16(R9) - - // Check values - MOVQ 8(R9), R13 - MOVQ (R9), R14 - LEAQ (R13)(R14*1), R15 - MOVQ s+0(FP), BP - ADDQ R15, 256(BP) - MOVQ ctx+16(FP), R15 - SUBQ R14, 128(R15) - JS error_not_enough_literals - CMPQ R13, $0x00020002 - JA sequenceDecs_decode_56_bmi2_error_match_len_too_big - TESTQ CX, CX - JNZ sequenceDecs_decode_56_bmi2_match_len_ofs_ok - TESTQ R13, R13 - JNZ sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decode_56_bmi2_match_len_ofs_ok: - ADDQ $0x18, R9 - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decode_56_bmi2_main_loop - MOVQ s+0(FP), CX - MOVQ R10, 144(CX) - MOVQ R11, 152(CX) - MOVQ R12, 160(CX) - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decode_56_bmi2_error_match_len_ofs_mismatch: - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decode_56_bmi2_error_match_len_too_big: - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - -// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (SI)(R14*1), X0 - MOVUPS X0, (BX)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, R11 - JB copy_1 - ADDQ R11, SI - ADDQ R11, BX - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ BX, R12 - ADDQ R13, BX - -copy_2: - MOVUPS (R11), X0 - MOVUPS X0, (R12) - ADDQ $0x10, R11 - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool -// Requires: SSE -TEXT ·sequenceDecs_executeSimple_safe_amd64(SB), $8-9 - MOVQ ctx+0(FP), R10 - MOVQ 8(R10), CX - TESTQ CX, CX - JZ empty_seqs - MOVQ (R10), AX - MOVQ 24(R10), DX - MOVQ 32(R10), BX - MOVQ 80(R10), SI - MOVQ 104(R10), DI - MOVQ 120(R10), R8 - MOVQ 56(R10), R9 - MOVQ 64(R10), R10 - ADDQ R10, R9 - - // seqsBase += 24 * seqIndex - LEAQ (DX)(DX*2), R11 - SHLQ $0x03, R11 - ADDQ R11, AX - - // outBase += outPosition - ADDQ DI, BX - -main_loop: - MOVQ (AX), R11 - MOVQ 16(AX), R12 - MOVQ 8(AX), R13 - - // Copy literals - TESTQ R11, R11 - JZ check_offset - MOVQ R11, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (SI), X0 - MOVUPS X0, (BX) - ADDQ $0x10, SI - ADDQ $0x10, BX - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(SI)(R14*1), SI - LEAQ 16(BX)(R14*1), BX - MOVUPS -16(SI), X0 - MOVUPS X0, -16(BX) - JMP copy_1_end - -copy_1_small: - CMPQ R11, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ R11, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (SI), R14 - MOVB -1(SI)(R11*1), R15 - MOVB R14, (BX) - MOVB R15, -1(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_3: - MOVW (SI), R14 - MOVB 2(SI), R15 - MOVW R14, (BX) - MOVB R15, 2(BX) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_4through7: - MOVL (SI), R14 - MOVL -4(SI)(R11*1), R15 - MOVL R14, (BX) - MOVL R15, -4(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (SI), R14 - MOVQ -8(SI)(R11*1), R15 - MOVQ R14, (BX) - MOVQ R15, -8(BX)(R11*1) - ADDQ R11, SI - ADDQ R11, BX - -copy_1_end: - ADDQ R11, DI - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - LEAQ (DI)(R10*1), R11 - CMPQ R12, R11 - JG error_match_off_too_big - CMPQ R12, R8 - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, R11 - SUBQ DI, R11 - JLS copy_match - MOVQ R9, R14 - SUBQ R11, R14 - CMPQ R13, R11 - JG copy_all_from_history - MOVQ R13, R11 - SUBQ $0x10, R11 - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R11 - JAE copy_4_loop - LEAQ 16(R14)(R11*1), R14 - LEAQ 16(BX)(R11*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), R11 - MOVB 2(R14), R12 - MOVW R11, (BX) - MOVB R12, 2(BX) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), R11 - MOVL -4(R14)(R13*1), R12 - MOVL R11, (BX) - MOVL R12, -4(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), R11 - MOVQ -8(R14)(R13*1), R12 - MOVQ R11, (BX) - MOVQ R12, -8(BX)(R13*1) - ADDQ R13, R14 - ADDQ R13, BX - -copy_4_end: - ADDQ R13, DI - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - JMP loop_finished - -copy_all_from_history: - MOVQ R11, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R14 - ADDQ $0x10, BX - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(BX)(R15*1), BX - MOVUPS -16(R14), X0 - MOVUPS X0, -16(BX) - JMP copy_5_end - -copy_5_small: - CMPQ R11, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ R11, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(R11*1), BP - MOVB R15, (BX) - MOVB BP, -1(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (BX) - MOVB BP, 2(BX) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(R11*1), BP - MOVL R15, (BX) - MOVL BP, -4(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(R11*1), BP - MOVQ R15, (BX) - MOVQ BP, -8(BX)(R11*1) - ADDQ R11, R14 - ADDQ R11, BX - -copy_5_end: - ADDQ R11, DI - SUBQ R11, R13 - - // Copy match from the current buffer -copy_match: - MOVQ BX, R11 - SUBQ R12, R11 - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, DI - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (R11), X0 - MOVUPS X0, (BX) - ADDQ $0x10, R11 - ADDQ $0x10, BX - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(R11)(R12*1), R11 - LEAQ 16(BX)(R12*1), BX - MOVUPS -16(R11), X0 - MOVUPS X0, -16(BX) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (R11), R12 - MOVB -1(R11)(R13*1), R14 - MOVB R12, (BX) - MOVB R14, -1(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_3: - MOVW (R11), R12 - MOVB 2(R11), R14 - MOVW R12, (BX) - MOVB R14, 2(BX) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_4through7: - MOVL (R11), R12 - MOVL -4(R11)(R13*1), R14 - MOVL R12, (BX) - MOVL R14, -4(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (R11), R12 - MOVQ -8(R11)(R13*1), R14 - MOVQ R12, (BX) - MOVQ R14, -8(BX)(R13*1) - ADDQ R13, R11 - ADDQ R13, BX - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, DI - -copy_slow_3: - MOVB (R11), R12 - MOVB R12, (BX) - INCQ R11 - INCQ BX - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - ADDQ $0x18, AX - INCQ DX - CMPQ DX, CX - JB main_loop - -loop_finished: - // Return value - MOVB $0x01, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -error_match_off_too_big: - // Return value - MOVB $0x00, ret+8(FP) - - // Update the context - MOVQ ctx+0(FP), AX - MOVQ DX, 24(AX) - MOVQ DI, 104(AX) - SUBQ 80(AX), SI - MOVQ SI, 112(AX) - RET - -empty_seqs: - // Return value - MOVB $0x01, ret+8(FP) - RET - -// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_end - -sequenceDecs_decodeSync_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_amd64_fill_2_end - -sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_amd64_after_adjust - -sequenceDecs_decodeSync_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R11)(R14*1), X0 - MOVUPS X0, (R10)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, AX - JB copy_1 - ADDQ AX, R11 - ADDQ AX, R10 - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R10, CX - ADDQ R13, R10 - -copy_2: - MOVUPS (AX), X0 - MOVUPS X0, (CX) - ADDQ $0x10, AX - ADDQ $0x10, CX - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_end - -sequenceDecs_decodeSync_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_bmi2_fill_2_end - -sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_bmi2_after_adjust - -sequenceDecs_decodeSync_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - XORQ R14, R14 - -copy_1: - MOVUPS (R10)(R14*1), X0 - MOVUPS X0, (R9)(R14*1) - ADDQ $0x10, R14 - CMPQ R14, CX - JB copy_1 - ADDQ CX, R10 - ADDQ CX, R9 - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R9, R12 - ADDQ R13, R9 - -copy_2: - MOVUPS (CX), X0 - MOVUPS X0, (R12) - ADDQ $0x10, CX - ADDQ $0x10, R12 - SUBQ $0x10, R13 - JHI copy_2 - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 - MOVQ br+8(FP), CX - MOVQ 24(CX), DX - MOVBQZX 32(CX), BX - MOVQ (CX), AX - MOVQ 8(CX), SI - ADDQ SI, AX - MOVQ AX, (SP) - MOVQ ctx+16(FP), AX - MOVQ 72(AX), DI - MOVQ 80(AX), R8 - MOVQ 88(AX), R9 - XORQ CX, CX - MOVQ CX, 8(SP) - MOVQ CX, 16(SP) - MOVQ CX, 24(SP) - MOVQ 112(AX), R10 - MOVQ 128(AX), CX - MOVQ CX, 32(SP) - MOVQ 144(AX), R11 - MOVQ 136(AX), R12 - MOVQ 200(AX), CX - MOVQ CX, 56(SP) - MOVQ 176(AX), CX - MOVQ CX, 48(SP) - MOVQ 184(AX), AX - MOVQ AX, 40(SP) - MOVQ 40(SP), AX - ADDQ AX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R10, 32(SP) - - // outBase += outPosition - ADDQ R12, R10 - -sequenceDecs_decodeSync_safe_amd64_main_loop: - MOVQ (SP), R13 - - // Fill bitreader to have enough for the offset and match length. - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_end - -sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_end: - // Update offset - MOVQ R9, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_of_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_of_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_of_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_of_update_zero: - MOVQ AX, 8(SP) - - // Update match length - MOVQ R8, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ml_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ml_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ml_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ml_update_zero: - MOVQ AX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ SI, $0x08 - JL sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - MOVQ BX, AX - SHRQ $0x03, AX - SUBQ AX, R13 - MOVQ (R13), DX - SUBQ AX, SI - ANDQ $0x07, BX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_end - -sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte: - CMPQ SI, $0x00 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread - CMPQ BX, $0x07 - JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end - SHLQ $0x08, DX - SUBQ $0x01, R13 - SUBQ $0x01, SI - SUBQ $0x08, BX - MOVBQZX (R13), AX - ORQ AX, DX - JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread: - CMPQ BX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_amd64_fill_2_end: - // Update literal length - MOVQ DI, AX - MOVQ BX, CX - MOVQ DX, R14 - SHLQ CL, R14 - MOVB AH, CL - SHRQ $0x20, AX - TESTQ CX, CX - JZ sequenceDecs_decodeSync_safe_amd64_ll_update_zero - ADDQ CX, BX - CMPQ BX, $0x40 - JA sequenceDecs_decodeSync_safe_amd64_ll_update_zero - CMPQ CX, $0x40 - JAE sequenceDecs_decodeSync_safe_amd64_ll_update_zero - NEGQ CX - SHRQ CL, R14 - ADDQ R14, AX - -sequenceDecs_decodeSync_safe_amd64_ll_update_zero: - MOVQ AX, 24(SP) - - // Fill bitreader for state updates - MOVQ R13, (SP) - MOVQ R9, AX - SHRQ $0x08, AX - MOVBQZX AL, AX - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_amd64_skip_update - - // Update Literal Length State - MOVBQZX DI, R13 - SHRL $0x10, DI - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, DI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(DI*8), DI - - // Update Match Length State - MOVBQZX R8, R13 - SHRL $0x10, R8 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R8 - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Offset State - MOVBQZX R9, R13 - SHRL $0x10, R9 - LEAQ (BX)(R13*1), CX - MOVQ DX, R14 - MOVQ CX, BX - ROLQ CL, R14 - MOVL $0x00000001, R15 - MOVB R13, CL - SHLL CL, R15 - DECL R15 - ANDQ R15, R14 - ADDQ R14, R9 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R9*8), R9 - -sequenceDecs_decodeSync_safe_amd64_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ AX, $0x01 - JBE sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_amd64_after_adjust - -sequenceDecs_decodeSync_safe_amd64_adjust_offset_nonzero: - MOVQ R13, AX - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, AX - CMOVQEQ R15, R14 - ADDQ 144(CX)(AX*8), R14 - JNZ sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_amd64_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_amd64_adjust_skip - MOVQ 152(CX), AX - MOVQ AX, 160(CX) - -sequenceDecs_decodeSync_safe_amd64_adjust_skip: - MOVQ 144(CX), AX - MOVQ AX, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_amd64_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), AX - MOVQ 24(SP), CX - LEAQ (AX)(CX*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ CX, 104(R14) - JS error_not_enough_literals - CMPQ AX, $0x00020002 - JA sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok - TESTQ AX, AX - JNZ sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_amd64_match_len_ofs_ok: - MOVQ 24(SP), AX - MOVQ 8(SP), CX - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (AX)(R13*1), R14 - ADDQ R10, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ AX, AX - JZ check_offset - MOVQ AX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R11), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R11 - ADDQ $0x10, R10 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R11)(R14*1), R11 - LEAQ 16(R10)(R14*1), R10 - MOVUPS -16(R11), X0 - MOVUPS X0, -16(R10) - JMP copy_1_end - -copy_1_small: - CMPQ AX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ AX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R11), R14 - MOVB -1(R11)(AX*1), R15 - MOVB R14, (R10) - MOVB R15, -1(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_3: - MOVW (R11), R14 - MOVB 2(R11), R15 - MOVW R14, (R10) - MOVB R15, 2(R10) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R11), R14 - MOVL -4(R11)(AX*1), R15 - MOVL R14, (R10) - MOVL R15, -4(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R11), R14 - MOVQ -8(R11)(AX*1), R15 - MOVQ R14, (R10) - MOVQ R15, -8(R10)(AX*1) - ADDQ AX, R11 - ADDQ AX, R10 - -copy_1_end: - ADDQ AX, R12 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R12, AX - ADDQ 40(SP), AX - CMPQ CX, AX - JG error_match_off_too_big - CMPQ CX, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ CX, AX - SUBQ R12, AX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ AX, R14 - CMPQ R13, AX - JG copy_all_from_history - MOVQ R13, AX - SUBQ $0x10, AX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, AX - JAE copy_4_loop - LEAQ 16(R14)(AX*1), R14 - LEAQ 16(R10)(AX*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), AX - MOVB 2(R14), CL - MOVW AX, (R10) - MOVB CL, 2(R10) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), AX - MOVL -4(R14)(R13*1), CX - MOVL AX, (R10) - MOVL CX, -4(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), AX - MOVQ -8(R14)(R13*1), CX - MOVQ AX, (R10) - MOVQ CX, -8(R10)(R13*1) - ADDQ R13, R14 - ADDQ R13, R10 - -copy_4_end: - ADDQ R13, R12 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ AX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R10) - ADDQ $0x10, R14 - ADDQ $0x10, R10 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R10)(R15*1), R10 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R10) - JMP copy_5_end - -copy_5_small: - CMPQ AX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ AX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(AX*1), BP - MOVB R15, (R10) - MOVB BP, -1(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R10) - MOVB BP, 2(R10) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(AX*1), BP - MOVL R15, (R10) - MOVL BP, -4(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(AX*1), BP - MOVQ R15, (R10) - MOVQ BP, -8(R10)(AX*1) - ADDQ AX, R14 - ADDQ AX, R10 - -copy_5_end: - ADDQ AX, R12 - SUBQ AX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R10, AX - SUBQ CX, AX - - // ml <= mo - CMPQ R13, CX - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R12 - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_2_small - -copy_2_loop: - MOVUPS (AX), X0 - MOVUPS X0, (R10) - ADDQ $0x10, AX - ADDQ $0x10, R10 - SUBQ $0x10, CX - JAE copy_2_loop - LEAQ 16(AX)(CX*1), AX - LEAQ 16(R10)(CX*1), R10 - MOVUPS -16(AX), X0 - MOVUPS X0, -16(R10) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (AX), CL - MOVB -1(AX)(R13*1), R14 - MOVB CL, (R10) - MOVB R14, -1(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_3: - MOVW (AX), CX - MOVB 2(AX), R14 - MOVW CX, (R10) - MOVB R14, 2(R10) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (AX), CX - MOVL -4(AX)(R13*1), R14 - MOVL CX, (R10) - MOVL R14, -4(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (AX), CX - MOVQ -8(AX)(R13*1), R14 - MOVQ CX, (R10) - MOVQ R14, -8(R10)(R13*1) - ADDQ R13, AX - ADDQ R13, R10 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R12 - -copy_slow_3: - MOVB (AX), CL - MOVB CL, (R10) - INCQ AX - INCQ R10 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), AX - DECQ 96(AX) - JNS sequenceDecs_decodeSync_safe_amd64_main_loop - -loop_finished: - MOVQ br+8(FP), AX - MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R12, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R11 - MOVQ R11, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_amd64_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_amd64_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R12, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET - -// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int -// Requires: BMI, BMI2, CMOV, SSE -TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 - MOVQ br+8(FP), BX - MOVQ 24(BX), AX - MOVBQZX 32(BX), DX - MOVQ (BX), CX - MOVQ 8(BX), BX - ADDQ BX, CX - MOVQ CX, (SP) - MOVQ ctx+16(FP), CX - MOVQ 72(CX), SI - MOVQ 80(CX), DI - MOVQ 88(CX), R8 - XORQ R9, R9 - MOVQ R9, 8(SP) - MOVQ R9, 16(SP) - MOVQ R9, 24(SP) - MOVQ 112(CX), R9 - MOVQ 128(CX), R10 - MOVQ R10, 32(SP) - MOVQ 144(CX), R10 - MOVQ 136(CX), R11 - MOVQ 200(CX), R12 - MOVQ R12, 56(SP) - MOVQ 176(CX), R12 - MOVQ R12, 48(SP) - MOVQ 184(CX), CX - MOVQ CX, 40(SP) - MOVQ 40(SP), CX - ADDQ CX, 48(SP) - - // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) - ADDQ R9, 32(SP) - - // outBase += outPosition - ADDQ R11, R9 - -sequenceDecs_decodeSync_safe_bmi2_main_loop: - MOVQ (SP), R12 - - // Fill bitreader to have enough for the offset and match length. - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_end - -sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_end: - // Update offset - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ R8, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 8(SP) - - // Update match length - MOVQ $0x00000808, CX - BEXTRQ CX, DI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ DI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 16(SP) - - // Fill bitreader to have enough for the remaining - CMPQ BX, $0x08 - JL sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - MOVQ DX, CX - SHRQ $0x03, CX - SUBQ CX, R12 - MOVQ (R12), AX - SUBQ CX, BX - ANDQ $0x07, DX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_end - -sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte: - CMPQ BX, $0x00 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread - CMPQ DX, $0x07 - JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end - SHLQ $0x08, AX - SUBQ $0x01, R12 - SUBQ $0x01, BX - SUBQ $0x08, DX - MOVBQZX (R12), CX - ORQ CX, AX - JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte - -sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread: - CMPQ DX, $0x40 - JA error_overread - -sequenceDecs_decodeSync_safe_bmi2_fill_2_end: - // Update literal length - MOVQ $0x00000808, CX - BEXTRQ CX, SI, R13 - MOVQ AX, R14 - LEAQ (DX)(R13*1), CX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - MOVQ CX, DX - MOVQ SI, CX - SHRQ $0x20, CX - ADDQ R14, CX - MOVQ CX, 24(SP) - - // Fill bitreader for state updates - MOVQ R12, (SP) - MOVQ $0x00000808, CX - BEXTRQ CX, R8, R12 - MOVQ ctx+16(FP), CX - CMPQ 96(CX), $0x00 - JZ sequenceDecs_decodeSync_safe_bmi2_skip_update - LEAQ (SI)(DI*1), R13 - ADDQ R8, R13 - MOVBQZX R13, R13 - LEAQ (DX)(R13*1), CX - MOVQ AX, R14 - MOVQ CX, DX - ROLQ CL, R14 - BZHIQ R13, R14, R14 - - // Update Offset State - BZHIQ R8, R14, CX - SHRXQ R8, R14, R14 - SHRL $0x10, R8 - ADDQ CX, R8 - - // Load ctx.ofTable - MOVQ ctx+16(FP), CX - MOVQ 48(CX), CX - MOVQ (CX)(R8*8), R8 - - // Update Match Length State - BZHIQ DI, R14, CX - SHRXQ DI, R14, R14 - SHRL $0x10, DI - ADDQ CX, DI - - // Load ctx.mlTable - MOVQ ctx+16(FP), CX - MOVQ 24(CX), CX - MOVQ (CX)(DI*8), DI - - // Update Literal Length State - BZHIQ SI, R14, CX - SHRL $0x10, SI - ADDQ CX, SI - - // Load ctx.llTable - MOVQ ctx+16(FP), CX - MOVQ (CX), CX - MOVQ (CX)(SI*8), SI - -sequenceDecs_decodeSync_safe_bmi2_skip_update: - // Adjust offset - MOVQ s+0(FP), CX - MOVQ 8(SP), R13 - CMPQ R12, $0x01 - JBE sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0 - MOVUPS 144(CX), X0 - MOVQ R13, 144(CX) - MOVUPS X0, 152(CX) - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offsetB_1_or_0: - CMPQ 24(SP), $0x00000000 - JNE sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero - INCQ R13 - JMP sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_maybezero: - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero - MOVQ 144(CX), R13 - JMP sequenceDecs_decodeSync_safe_bmi2_after_adjust - -sequenceDecs_decodeSync_safe_bmi2_adjust_offset_nonzero: - MOVQ R13, R12 - XORQ R14, R14 - MOVQ $-1, R15 - CMPQ R13, $0x03 - CMOVQEQ R14, R12 - CMOVQEQ R15, R14 - ADDQ 144(CX)(R12*8), R14 - JNZ sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid - MOVQ $0x00000001, R14 - -sequenceDecs_decodeSync_safe_bmi2_adjust_temp_valid: - CMPQ R13, $0x01 - JZ sequenceDecs_decodeSync_safe_bmi2_adjust_skip - MOVQ 152(CX), R12 - MOVQ R12, 160(CX) - -sequenceDecs_decodeSync_safe_bmi2_adjust_skip: - MOVQ 144(CX), R12 - MOVQ R12, 152(CX) - MOVQ R14, 144(CX) - MOVQ R14, R13 - -sequenceDecs_decodeSync_safe_bmi2_after_adjust: - MOVQ R13, 8(SP) - - // Check values - MOVQ 16(SP), CX - MOVQ 24(SP), R12 - LEAQ (CX)(R12*1), R14 - MOVQ s+0(FP), R15 - ADDQ R14, 256(R15) - MOVQ ctx+16(FP), R14 - SUBQ R12, 104(R14) - JS error_not_enough_literals - CMPQ CX, $0x00020002 - JA sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big - TESTQ R13, R13 - JNZ sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok - TESTQ CX, CX - JNZ sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch - -sequenceDecs_decodeSync_safe_bmi2_match_len_ofs_ok: - MOVQ 24(SP), CX - MOVQ 8(SP), R12 - MOVQ 16(SP), R13 - - // Check if we have enough space in s.out - LEAQ (CX)(R13*1), R14 - ADDQ R9, R14 - CMPQ R14, 32(SP) - JA error_not_enough_space - - // Copy literals - TESTQ CX, CX - JZ check_offset - MOVQ CX, R14 - SUBQ $0x10, R14 - JB copy_1_small - -copy_1_loop: - MOVUPS (R10), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R10 - ADDQ $0x10, R9 - SUBQ $0x10, R14 - JAE copy_1_loop - LEAQ 16(R10)(R14*1), R10 - LEAQ 16(R9)(R14*1), R9 - MOVUPS -16(R10), X0 - MOVUPS X0, -16(R9) - JMP copy_1_end - -copy_1_small: - CMPQ CX, $0x03 - JE copy_1_move_3 - JB copy_1_move_1or2 - CMPQ CX, $0x08 - JB copy_1_move_4through7 - JMP copy_1_move_8through16 - -copy_1_move_1or2: - MOVB (R10), R14 - MOVB -1(R10)(CX*1), R15 - MOVB R14, (R9) - MOVB R15, -1(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_3: - MOVW (R10), R14 - MOVB 2(R10), R15 - MOVW R14, (R9) - MOVB R15, 2(R9) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_4through7: - MOVL (R10), R14 - MOVL -4(R10)(CX*1), R15 - MOVL R14, (R9) - MOVL R15, -4(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - JMP copy_1_end - -copy_1_move_8through16: - MOVQ (R10), R14 - MOVQ -8(R10)(CX*1), R15 - MOVQ R14, (R9) - MOVQ R15, -8(R9)(CX*1) - ADDQ CX, R10 - ADDQ CX, R9 - -copy_1_end: - ADDQ CX, R11 - - // Malformed input if seq.mo > t+len(hist) || seq.mo > s.windowSize) -check_offset: - MOVQ R11, CX - ADDQ 40(SP), CX - CMPQ R12, CX - JG error_match_off_too_big - CMPQ R12, 56(SP) - JG error_match_off_too_big - - // Copy match from history - MOVQ R12, CX - SUBQ R11, CX - JLS copy_match - MOVQ 48(SP), R14 - SUBQ CX, R14 - CMPQ R13, CX - JG copy_all_from_history - MOVQ R13, CX - SUBQ $0x10, CX - JB copy_4_small - -copy_4_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, CX - JAE copy_4_loop - LEAQ 16(R14)(CX*1), R14 - LEAQ 16(R9)(CX*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_4_end - -copy_4_small: - CMPQ R13, $0x03 - JE copy_4_move_3 - CMPQ R13, $0x08 - JB copy_4_move_4through7 - JMP copy_4_move_8through16 - -copy_4_move_3: - MOVW (R14), CX - MOVB 2(R14), R12 - MOVW CX, (R9) - MOVB R12, 2(R9) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_4through7: - MOVL (R14), CX - MOVL -4(R14)(R13*1), R12 - MOVL CX, (R9) - MOVL R12, -4(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - JMP copy_4_end - -copy_4_move_8through16: - MOVQ (R14), CX - MOVQ -8(R14)(R13*1), R12 - MOVQ CX, (R9) - MOVQ R12, -8(R9)(R13*1) - ADDQ R13, R14 - ADDQ R13, R9 - -copy_4_end: - ADDQ R13, R11 - JMP handle_loop - JMP loop_finished - -copy_all_from_history: - MOVQ CX, R15 - SUBQ $0x10, R15 - JB copy_5_small - -copy_5_loop: - MOVUPS (R14), X0 - MOVUPS X0, (R9) - ADDQ $0x10, R14 - ADDQ $0x10, R9 - SUBQ $0x10, R15 - JAE copy_5_loop - LEAQ 16(R14)(R15*1), R14 - LEAQ 16(R9)(R15*1), R9 - MOVUPS -16(R14), X0 - MOVUPS X0, -16(R9) - JMP copy_5_end - -copy_5_small: - CMPQ CX, $0x03 - JE copy_5_move_3 - JB copy_5_move_1or2 - CMPQ CX, $0x08 - JB copy_5_move_4through7 - JMP copy_5_move_8through16 - -copy_5_move_1or2: - MOVB (R14), R15 - MOVB -1(R14)(CX*1), BP - MOVB R15, (R9) - MOVB BP, -1(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_3: - MOVW (R14), R15 - MOVB 2(R14), BP - MOVW R15, (R9) - MOVB BP, 2(R9) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_4through7: - MOVL (R14), R15 - MOVL -4(R14)(CX*1), BP - MOVL R15, (R9) - MOVL BP, -4(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - JMP copy_5_end - -copy_5_move_8through16: - MOVQ (R14), R15 - MOVQ -8(R14)(CX*1), BP - MOVQ R15, (R9) - MOVQ BP, -8(R9)(CX*1) - ADDQ CX, R14 - ADDQ CX, R9 - -copy_5_end: - ADDQ CX, R11 - SUBQ CX, R13 - - // Copy match from the current buffer -copy_match: - MOVQ R9, CX - SUBQ R12, CX - - // ml <= mo - CMPQ R13, R12 - JA copy_overlapping_match - - // Copy non-overlapping match - ADDQ R13, R11 - MOVQ R13, R12 - SUBQ $0x10, R12 - JB copy_2_small - -copy_2_loop: - MOVUPS (CX), X0 - MOVUPS X0, (R9) - ADDQ $0x10, CX - ADDQ $0x10, R9 - SUBQ $0x10, R12 - JAE copy_2_loop - LEAQ 16(CX)(R12*1), CX - LEAQ 16(R9)(R12*1), R9 - MOVUPS -16(CX), X0 - MOVUPS X0, -16(R9) - JMP copy_2_end - -copy_2_small: - CMPQ R13, $0x03 - JE copy_2_move_3 - JB copy_2_move_1or2 - CMPQ R13, $0x08 - JB copy_2_move_4through7 - JMP copy_2_move_8through16 - -copy_2_move_1or2: - MOVB (CX), R12 - MOVB -1(CX)(R13*1), R14 - MOVB R12, (R9) - MOVB R14, -1(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_3: - MOVW (CX), R12 - MOVB 2(CX), R14 - MOVW R12, (R9) - MOVB R14, 2(R9) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_4through7: - MOVL (CX), R12 - MOVL -4(CX)(R13*1), R14 - MOVL R12, (R9) - MOVL R14, -4(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - JMP copy_2_end - -copy_2_move_8through16: - MOVQ (CX), R12 - MOVQ -8(CX)(R13*1), R14 - MOVQ R12, (R9) - MOVQ R14, -8(R9)(R13*1) - ADDQ R13, CX - ADDQ R13, R9 - -copy_2_end: - JMP handle_loop - - // Copy overlapping match -copy_overlapping_match: - ADDQ R13, R11 - -copy_slow_3: - MOVB (CX), R12 - MOVB R12, (R9) - INCQ CX - INCQ R9 - DECQ R13 - JNZ copy_slow_3 - -handle_loop: - MOVQ ctx+16(FP), CX - DECQ 96(CX) - JNS sequenceDecs_decodeSync_safe_bmi2_main_loop - -loop_finished: - MOVQ br+8(FP), CX - MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) - - // Update the context - MOVQ ctx+16(FP), AX - MOVQ R11, 136(AX) - MOVQ 144(AX), CX - SUBQ CX, R10 - MOVQ R10, 168(AX) - - // Return success - MOVQ $0x00000000, ret+24(FP) - RET - - // Return with match length error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_ofs_mismatch: - MOVQ 16(SP), AX - MOVQ ctx+16(FP), CX - MOVQ AX, 216(CX) - MOVQ $0x00000001, ret+24(FP) - RET - - // Return with match too long error -sequenceDecs_decodeSync_safe_bmi2_error_match_len_too_big: - MOVQ ctx+16(FP), AX - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ $0x00000002, ret+24(FP) - RET - - // Return with match offset too long error -error_match_off_too_big: - MOVQ ctx+16(FP), AX - MOVQ 8(SP), CX - MOVQ CX, 224(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000003, ret+24(FP) - RET - - // Return with not enough literals error -error_not_enough_literals: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ $0x00000004, ret+24(FP) - RET - - // Return with overread error -error_overread: - MOVQ $0x00000006, ret+24(FP) - RET - - // Return with not enough output space error -error_not_enough_space: - MOVQ ctx+16(FP), AX - MOVQ 24(SP), CX - MOVQ CX, 208(AX) - MOVQ 16(SP), CX - MOVQ CX, 216(AX) - MOVQ R11, 136(AX) - MOVQ $0x00000005, ret+24(FP) - RET diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go deleted file mode 100644 index 2fb35b788c..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ /dev/null @@ -1,237 +0,0 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - -package zstd - -import ( - "fmt" - "io" -) - -// decode sequences from the stream with the provided history but without dictionary. -func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { - return false, nil -} - -// decode sequences from the stream without the provided history. -func (s *sequenceDecs) decode(seqs []seqVals) error { - br := s.br - - // Grab full sizes tables, to avoid bounds checks. - llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] - llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state - s.seqSize = 0 - litRemain := len(s.literals) - - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } - for i := range seqs { - var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { - // inlined function: - // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) - - // Final will not read from stream. - var llB, mlB, moB uint8 - ll, llB = llState.final() - ml, mlB = mlState.final() - mo, moB = ofState.final() - - // extra bits are stored in reverse order. - br.fillFast() - mo += br.getBits(moB) - if s.maxBits > 32 { - br.fillFast() - } - ml += br.getBits(mlB) - ll += br.getBits(llB) - - if moB > 1 { - s.prevOffset[2] = s.prevOffset[1] - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = mo - } else { - // mo = s.adjustOffset(mo, ll, moB) - // Inlined for rather big speedup - if ll == 0 { - // There is an exception though, when current sequence's literals_length = 0. - // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, - // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. - mo++ - } - - if mo == 0 { - mo = s.prevOffset[0] - } else { - var temp int - if mo == 3 { - temp = s.prevOffset[0] - 1 - } else { - temp = s.prevOffset[mo] - } - - if temp == 0 { - // 0 is not valid; input is corrupted; force offset to 1 - println("WARNING: temp was 0") - temp = 1 - } - - if mo != 1 { - s.prevOffset[2] = s.prevOffset[1] - } - s.prevOffset[1] = s.prevOffset[0] - s.prevOffset[0] = temp - mo = temp - } - } - br.fillFast() - } else { - if br.overread() { - if debugDecoder { - printf("reading sequence %d, exceeded available data\n", i) - } - return io.ErrUnexpectedEOF - } - ll, mo, ml = s.next(br, llState, mlState, ofState) - br.fill() - } - - if debugSequences { - println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) - } - // Evaluate. - // We might be doing this async, so do it early. - if mo == 0 && ml > 0 { - return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) - } - if ml > maxMatchLen { - return fmt.Errorf("match len (%d) bigger than max allowed length", ml) - } - s.seqSize += ll + ml - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - litRemain -= ll - if litRemain < 0 { - return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) - } - seqs[i] = seqVals{ - ll: ll, - ml: ml, - mo: mo, - } - if i == len(seqs)-1 { - // This is the last sequence, so we shouldn't update state. - break - } - - // Manually inlined, ~ 5-20% faster - // Update all 3 states at once. Approx 20% faster. - nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() - if nBits == 0 { - llState = llTable[llState.newState()&maxTableMask] - mlState = mlTable[mlState.newState()&maxTableMask] - ofState = ofTable[ofState.newState()&maxTableMask] - } else { - bits := br.get32BitsFast(nBits) - lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) - llState = llTable[(llState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits >> (ofState.nbBits() & 31)) - lowBits &= bitMask[mlState.nbBits()&15] - mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] - - lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] - ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] - } - } - s.seqSize += litRemain - if s.seqSize > maxBlockSize { - return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) - } - err := br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } - return err -} - -// executeSimple handles cases when a dictionary is not used. -func (s *sequenceDecs) executeSimple(seqs []seqVals, hist []byte) error { - // Ensure we have enough output size... - if len(s.out)+s.seqSize > cap(s.out) { - addBytes := s.seqSize + len(s.out) - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] - } - - if debugDecoder { - printf("Execute %d seqs with literals: %d into %d bytes\n", len(seqs), len(s.literals), s.seqSize) - } - - var t = len(s.out) - out := s.out[:t+s.seqSize] - - for _, seq := range seqs { - // Add literals - copy(out[t:], s.literals[:seq.ll]) - t += seq.ll - s.literals = s.literals[seq.ll:] - - // Malformed input - if seq.mo > t+len(hist) || seq.mo > s.windowSize { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) - } - - // Copy from history. - if v := seq.mo - t; v > 0 { - // v is the start position in history from end. - start := len(hist) - v - if seq.ml > v { - // Some goes into the current block. - // Copy remainder of history - copy(out[t:], hist[start:]) - t += v - seq.ml -= v - } else { - copy(out[t:], hist[start:start+seq.ml]) - t += seq.ml - continue - } - } - - // We must be in the current buffer now - if seq.ml > 0 { - start := t - seq.mo - if seq.ml <= t-start { - // No overlap - copy(out[t:], out[start:start+seq.ml]) - t += seq.ml - } else { - // Overlapping copy - // Extend destination slice and copy one byte at the time. - src := out[start : start+seq.ml] - dst := out[t:] - dst = dst[:len(src)] - t += len(src) - // Destination is the space we just added. - for i := range src { - dst[i] = src[i] - } - } - } - } - // Add final literals - copy(out[t:], s.literals) - if debugDecoder { - t += len(s.literals) - if t != len(out) { - panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) - } - } - s.out = out - - return nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go deleted file mode 100644 index 8014174a77..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import "math/bits" - -type seqCoders struct { - llEnc, ofEnc, mlEnc *fseEncoder - llPrev, ofPrev, mlPrev *fseEncoder -} - -// swap coders with another (block). -func (s *seqCoders) swap(other *seqCoders) { - *s, *other = *other, *s -} - -// setPrev will update the previous encoders to the actually used ones -// and make sure a fresh one is in the main slot. -func (s *seqCoders) setPrev(ll, ml, of *fseEncoder) { - compareSwap := func(used *fseEncoder, current, prev **fseEncoder) { - // We used the new one, more current to history and reuse the previous history - if *current == used { - *prev, *current = *current, *prev - c := *current - p := *prev - c.reUsed = false - p.reUsed = true - return - } - if used == *prev { - return - } - // Ensure we cannot reuse by accident - prevEnc := *prev - prevEnc.symbolLen = 0 - } - compareSwap(ll, &s.llEnc, &s.llPrev) - compareSwap(ml, &s.mlEnc, &s.mlPrev) - compareSwap(of, &s.ofEnc, &s.ofPrev) -} - -func highBit(val uint32) (n uint32) { - return uint32(bits.Len32(val) - 1) -} - -var llCodeTable = [64]byte{0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 16, 17, 17, 18, 18, 19, 19, - 20, 20, 20, 20, 21, 21, 21, 21, - 22, 22, 22, 22, 22, 22, 22, 22, - 23, 23, 23, 23, 23, 23, 23, 23, - 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24} - -// Up to 6 bits -const maxLLCode = 35 - -// llBitsTable translates from ll code to number of bits. -var llBitsTable = [maxLLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 6, 7, 8, 9, 10, 11, 12, - 13, 14, 15, 16} - -// llCode returns the code that represents the literal length requested. -func llCode(litLength uint32) uint8 { - const llDeltaCode = 19 - if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) - return llCodeTable[litLength&63] - } - return uint8(highBit(litLength)) + llDeltaCode -} - -var mlCodeTable = [128]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, - 32, 32, 33, 33, 34, 34, 35, 35, 36, 36, 36, 36, 37, 37, 37, 37, - 38, 38, 38, 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 39, 39, - 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, - 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, 42} - -// Up to 6 bits -const maxMLCode = 52 - -// mlBitsTable translates from ml code to number of bits. -var mlBitsTable = [maxMLCode + 1]byte{ - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 2, 2, 3, 3, - 4, 4, 5, 7, 8, 9, 10, 11, - 12, 13, 14, 15, 16} - -// note : mlBase = matchLength - MINMATCH; -// because it's the format it's stored in seqStore->sequences -func mlCode(mlBase uint32) uint8 { - const mlDeltaCode = 36 - if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) - return mlCodeTable[mlBase&127] - } - return uint8(highBit(mlBase)) + mlDeltaCode -} - -func ofCode(offset uint32) uint8 { - // A valid offset will always be > 0. - return uint8(bits.Len32(offset) - 1) -} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go deleted file mode 100644 index ec13594e89..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ /dev/null @@ -1,434 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. -// Based on work by Yann Collet, released under BSD License. - -package zstd - -import ( - "encoding/binary" - "errors" - "hash/crc32" - "io" - - "github.com/klauspost/compress/huff0" - snappy "github.com/klauspost/compress/internal/snapref" -) - -const ( - snappyTagLiteral = 0x00 - snappyTagCopy1 = 0x01 - snappyTagCopy2 = 0x02 - snappyTagCopy4 = 0x03 -) - -const ( - snappyChecksumSize = 4 - snappyMagicBody = "sNaPpY" - - // snappyMaxBlockSize is the maximum size of the input to encodeBlock. It is not - // part of the wire format per se, but some parts of the encoder assume - // that an offset fits into a uint16. - // - // Also, for the framing format (Writer type instead of Encode function), - // https://github.com/google/snappy/blob/master/framing_format.txt says - // that "the uncompressed data in a chunk must be no longer than 65536 - // bytes". - snappyMaxBlockSize = 65536 - - // snappyMaxEncodedLenOfMaxBlockSize equals MaxEncodedLen(snappyMaxBlockSize), but is - // hard coded to be a const instead of a variable, so that obufLen can also - // be a const. Their equivalence is confirmed by - // TestMaxEncodedLenOfMaxBlockSize. - snappyMaxEncodedLenOfMaxBlockSize = 76490 -) - -const ( - chunkTypeCompressedData = 0x00 - chunkTypeUncompressedData = 0x01 - chunkTypePadding = 0xfe - chunkTypeStreamIdentifier = 0xff -) - -var ( - // ErrSnappyCorrupt reports that the input is invalid. - ErrSnappyCorrupt = errors.New("snappy: corrupt input") - // ErrSnappyTooLarge reports that the uncompressed length is too large. - ErrSnappyTooLarge = errors.New("snappy: decoded block is too large") - // ErrSnappyUnsupported reports that the input isn't supported. - ErrSnappyUnsupported = errors.New("snappy: unsupported input") - - errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") -) - -// SnappyConverter can read SnappyConverter-compressed streams and convert them to zstd. -// Conversion is done by converting the stream directly from Snappy without intermediate -// full decoding. -// Therefore the compression ratio is much less than what can be done by a full decompression -// and compression, and a faulty Snappy stream may lead to a faulty Zstandard stream without -// any errors being generated. -// No CRC value is being generated and not all CRC values of the Snappy stream are checked. -// However, it provides really fast recompression of Snappy streams. -// The converter can be reused to avoid allocations, even after errors. -type SnappyConverter struct { - r io.Reader - err error - buf []byte - block *blockEnc -} - -// Convert the Snappy stream supplied in 'in' and write the zStandard stream to 'w'. -// If any error is detected on the Snappy stream it is returned. -// The number of bytes written is returned. -func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { - initPredefined() - r.err = nil - r.r = in - if r.block == nil { - r.block = &blockEnc{} - r.block.init() - } - r.block.initNewEncode() - if len(r.buf) != snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize { - r.buf = make([]byte, snappyMaxEncodedLenOfMaxBlockSize+snappyChecksumSize) - } - r.block.litEnc.Reuse = huff0.ReusePolicyNone - var written int64 - var readHeader bool - { - header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0]) - - var n int - n, r.err = w.Write(header) - if r.err != nil { - return written, r.err - } - written += int64(n) - } - - for { - if !r.readFull(r.buf[:4], true) { - // Add empty last block - r.block.reset(nil) - r.block.last = true - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, err := w.Write(r.block.output) - if err != nil { - return written, err - } - written += int64(n) - - return written, r.err - } - chunkType := r.buf[0] - if !readHeader { - if chunkType != chunkTypeStreamIdentifier { - println("chunkType != chunkTypeStreamIdentifier", chunkType) - r.err = ErrSnappyCorrupt - return written, r.err - } - readHeader = true - } - chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 - if chunkLen > len(r.buf) { - println("chunkLen > len(r.buf)", chunkType) - r.err = ErrSnappyUnsupported - return written, r.err - } - - // The chunk types are specified at - // https://github.com/google/snappy/blob/master/framing_format.txt - switch chunkType { - case chunkTypeCompressedData: - // Section 4.2. Compressed data (chunk type 0x00). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - buf := r.buf[:chunkLen] - if !r.readFull(buf, false) { - return written, r.err - } - //checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - buf = buf[snappyChecksumSize:] - - n, hdr, err := snappyDecodedLen(buf) - if err != nil { - r.err = err - return written, r.err - } - buf = buf[hdr:] - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - r.block.pushOffsets() - if err := decodeSnappy(r.block, buf); err != nil { - r.err = err - return written, r.err - } - if r.block.size+r.block.extraLits != n { - printf("invalid size, want %d, got %d\n", n, r.block.size+r.block.extraLits) - r.err = ErrSnappyCorrupt - return written, r.err - } - err = r.block.encode(nil, false, false) - switch err { - case errIncompressible: - r.block.popOffsets() - r.block.reset(nil) - r.block.literals, err = snappy.Decode(r.block.literals[:n], r.buf[snappyChecksumSize:chunkLen]) - if err != nil { - return written, err - } - err = r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - case nil: - default: - return written, err - } - - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - case chunkTypeUncompressedData: - if debugEncoder { - println("Uncompressed, chunklen", chunkLen) - } - // Section 4.3. Uncompressed data (chunk type 0x01). - if chunkLen < snappyChecksumSize { - println("chunkLen < snappyChecksumSize", chunkLen, snappyChecksumSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.reset(nil) - buf := r.buf[:snappyChecksumSize] - if !r.readFull(buf, false) { - return written, r.err - } - checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 - // Read directly into r.decoded instead of via r.buf. - n := chunkLen - snappyChecksumSize - if n > snappyMaxBlockSize { - println("n > snappyMaxBlockSize", n, snappyMaxBlockSize) - r.err = ErrSnappyCorrupt - return written, r.err - } - r.block.literals = r.block.literals[:n] - if !r.readFull(r.block.literals, false) { - return written, r.err - } - if snappyCRC(r.block.literals) != checksum { - println("literals crc mismatch") - r.err = ErrSnappyCorrupt - return written, r.err - } - err := r.block.encodeLits(r.block.literals, false) - if err != nil { - return written, err - } - n, r.err = w.Write(r.block.output) - if r.err != nil { - return written, err - } - written += int64(n) - continue - - case chunkTypeStreamIdentifier: - if debugEncoder { - println("stream id", chunkLen, len(snappyMagicBody)) - } - // Section 4.1. Stream identifier (chunk type 0xff). - if chunkLen != len(snappyMagicBody) { - println("chunkLen != len(snappyMagicBody)", chunkLen, len(snappyMagicBody)) - r.err = ErrSnappyCorrupt - return written, r.err - } - if !r.readFull(r.buf[:len(snappyMagicBody)], false) { - return written, r.err - } - for i := 0; i < len(snappyMagicBody); i++ { - if r.buf[i] != snappyMagicBody[i] { - println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) - r.err = ErrSnappyCorrupt - return written, r.err - } - } - continue - } - - if chunkType <= 0x7f { - // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). - println("chunkType <= 0x7f") - r.err = ErrSnappyUnsupported - return written, r.err - } - // Section 4.4 Padding (chunk type 0xfe). - // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). - if !r.readFull(r.buf[:chunkLen], false) { - return written, r.err - } - } -} - -// decodeSnappy writes the decoding of src to dst. It assumes that the varint-encoded -// length of the decompressed bytes has already been read. -func decodeSnappy(blk *blockEnc, src []byte) error { - //decodeRef(make([]byte, snappyMaxBlockSize), src) - var s, length int - lits := blk.extraLits - var offset uint32 - for s < len(src) { - switch src[s] & 0x03 { - case snappyTagLiteral: - x := uint32(src[s] >> 2) - switch { - case x < 60: - s++ - case x == 60: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-1]) - case x == 61: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-2]) | uint32(src[s-1])<<8 - case x == 62: - s += 4 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 - case x == 63: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, src) - return ErrSnappyCorrupt - } - x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - if x > snappyMaxBlockSize { - println("x > snappyMaxBlockSize", x, snappyMaxBlockSize) - return ErrSnappyCorrupt - } - length = int(x) + 1 - if length <= 0 { - println("length <= 0 ", length) - - return errUnsupportedLiteralLength - } - //if length > snappyMaxBlockSize-d || uint32(length) > len(src)-s { - // return ErrSnappyCorrupt - //} - - blk.literals = append(blk.literals, src[s:s+length]...) - //println(length, "litLen") - lits += length - s += length - continue - - case snappyTagCopy1: - s += 2 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 4 + int(src[s-2])>>2&0x7 - offset = uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]) - - case snappyTagCopy2: - s += 3 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-3])>>2 - offset = uint32(src[s-2]) | uint32(src[s-1])<<8 - - case snappyTagCopy4: - s += 5 - if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. - println("uint(s) > uint(len(src)", s, len(src)) - return ErrSnappyCorrupt - } - length = 1 + int(src[s-5])>>2 - offset = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 - } - - if offset <= 0 || blk.size+lits < int(offset) /*|| length > len(blk)-d */ { - println("offset <= 0 || blk.size+lits < int(offset)", offset, blk.size+lits, int(offset), blk.size, lits) - - return ErrSnappyCorrupt - } - - // Check if offset is one of the recent offsets. - // Adjusts the output offset accordingly. - // Gives a tiny bit of compression, typically around 1%. - if false { - offset = blk.matchOffset(offset, uint32(lits)) - } else { - offset += 3 - } - - blk.sequences = append(blk.sequences, seq{ - litLen: uint32(lits), - offset: offset, - matchLen: uint32(length) - zstdMinMatch, - }) - blk.size += length + lits - lits = 0 - } - blk.extraLits = lits - return nil -} - -func (r *SnappyConverter) readFull(p []byte, allowEOF bool) (ok bool) { - if _, r.err = io.ReadFull(r.r, p); r.err != nil { - if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { - r.err = ErrSnappyCorrupt - } - return false - } - return true -} - -var crcTable = crc32.MakeTable(crc32.Castagnoli) - -// crc implements the checksum specified in section 3 of -// https://github.com/google/snappy/blob/master/framing_format.txt -func snappyCRC(b []byte) uint32 { - c := crc32.Update(0, crcTable, b) - return c>>15 | c<<17 + 0xa282ead8 -} - -// snappyDecodedLen returns the length of the decoded block and the number of bytes -// that the length header occupied. -func snappyDecodedLen(src []byte) (blockLen, headerLen int, err error) { - v, n := binary.Uvarint(src) - if n <= 0 || v > 0xffffffff { - return 0, 0, ErrSnappyCorrupt - } - - const wordSize = 32 << (^uint(0) >> 32 & 1) - if wordSize == 32 && v > 0x7fffffff { - return 0, 0, ErrSnappyTooLarge - } - return int(v), n, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go deleted file mode 100644 index 29c15c8c4e..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package zstd - -import ( - "errors" - "io" - "sync" -) - -// ZipMethodWinZip is the method for Zstandard compressed data inside Zip files for WinZip. -// See https://www.winzip.com/win/en/comp_info.html -const ZipMethodWinZip = 93 - -// ZipMethodPKWare is the original method number used by PKWARE to indicate Zstandard compression. -// Deprecated: This has been deprecated by PKWARE, use ZipMethodWinZip instead for compression. -// See https://pkware.cachefly.net/webdocs/APPNOTE/APPNOTE-6.3.9.TXT -const ZipMethodPKWare = 20 - -// zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { - z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) - if err != nil { - panic(err) - } - return z -}} - -// newZipReader creates a pooled zip decompressor. -func newZipReader(opts ...DOption) func(r io.Reader) io.ReadCloser { - pool := &zipReaderPool - if len(opts) > 0 { - opts = append([]DOption{WithDecoderLowmem(true), WithDecoderMaxWindow(128 << 20)}, opts...) - // Force concurrency 1 - opts = append(opts, WithDecoderConcurrency(1)) - // Create our own pool - pool = &sync.Pool{} - } - return func(r io.Reader) io.ReadCloser { - dec, ok := pool.Get().(*Decoder) - if ok { - dec.Reset(r) - } else { - d, err := NewReader(r, opts...) - if err != nil { - panic(err) - } - dec = d - } - return &pooledZipReader{dec: dec, pool: pool} - } -} - -type pooledZipReader struct { - mu sync.Mutex // guards Close and Read - pool *sync.Pool - dec *Decoder -} - -func (r *pooledZipReader) Read(p []byte) (n int, err error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.dec == nil { - return 0, errors.New("read after close or EOF") - } - dec, err := r.dec.Read(p) - if err == io.EOF { - r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return dec, err -} - -func (r *pooledZipReader) Close() error { - r.mu.Lock() - defer r.mu.Unlock() - var err error - if r.dec != nil { - err = r.dec.Reset(nil) - r.pool.Put(r.dec) - r.dec = nil - } - return err -} - -type pooledZipWriter struct { - mu sync.Mutex // guards Close and Read - enc *Encoder - pool *sync.Pool -} - -func (w *pooledZipWriter) Write(p []byte) (n int, err error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.enc == nil { - return 0, errors.New("Write after Close") - } - return w.enc.Write(p) -} - -func (w *pooledZipWriter) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - var err error - if w.enc != nil { - err = w.enc.Close() - w.pool.Put(w.enc) - w.enc = nil - } - return err -} - -// ZipCompressor returns a compressor that can be registered with zip libraries. -// The provided encoder options will be used on all encodes. -func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) { - var pool sync.Pool - return func(w io.Writer) (io.WriteCloser, error) { - enc, ok := pool.Get().(*Encoder) - if ok { - enc.Reset(w) - } else { - var err error - enc, err = NewWriter(w, opts...) - if err != nil { - return nil, err - } - } - return &pooledZipWriter{enc: enc, pool: &pool}, nil - } -} - -// ZipDecompressor returns a decompressor that can be registered with zip libraries. -// See ZipCompressor for example. -// Options can be specified. WithDecoderConcurrency(1) is forced, -// and by default a 128MB maximum decompression window is specified. -// The window size can be overridden if required. -func ZipDecompressor(opts ...DOption) func(r io.Reader) io.ReadCloser { - return newZipReader(opts...) -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go deleted file mode 100644 index 4be7cc7367..0000000000 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ /dev/null @@ -1,121 +0,0 @@ -// Package zstd provides decompression of zstandard files. -// -// For advanced usage and examples, go to the README: https://github.com/klauspost/compress/tree/master/zstd#zstd -package zstd - -import ( - "bytes" - "encoding/binary" - "errors" - "log" - "math" -) - -// enable debug printing -const debug = false - -// enable encoding debug printing -const debugEncoder = debug - -// enable decoding debug printing -const debugDecoder = debug - -// Enable extra assertions. -const debugAsserts = debug || false - -// print sequence details -const debugSequences = false - -// print detailed matching information -const debugMatches = false - -// force encoder to use predefined tables. -const forcePreDef = false - -// zstdMinMatch is the minimum zstd match length. -const zstdMinMatch = 3 - -// fcsUnknown is used for unknown frame content size. -const fcsUnknown = math.MaxUint64 - -var ( - // ErrReservedBlockType is returned when a reserved block type is found. - // Typically this indicates wrong or corrupted input. - ErrReservedBlockType = errors.New("invalid input: reserved block type encountered") - - // ErrCompressedSizeTooBig is returned when a block is bigger than allowed. - // Typically this indicates wrong or corrupted input. - ErrCompressedSizeTooBig = errors.New("invalid input: compressed size too big") - - // ErrBlockTooSmall is returned when a block is too small to be decoded. - // Typically returned on invalid input. - ErrBlockTooSmall = errors.New("block too small") - - // ErrUnexpectedBlockSize is returned when a block has unexpected size. - // Typically returned on invalid input. - ErrUnexpectedBlockSize = errors.New("unexpected block size") - - // ErrMagicMismatch is returned when a "magic" number isn't what is expected. - // Typically this indicates wrong or corrupted input. - ErrMagicMismatch = errors.New("invalid input: magic number mismatch") - - // ErrWindowSizeExceeded is returned when a reference exceeds the valid window size. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeExceeded = errors.New("window size exceeded") - - // ErrWindowSizeTooSmall is returned when no window size is specified. - // Typically this indicates wrong or corrupted input. - ErrWindowSizeTooSmall = errors.New("invalid input: window size was too small") - - // ErrDecoderSizeExceeded is returned if decompressed size exceeds the configured limit. - ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit") - - // ErrUnknownDictionary is returned if the dictionary ID is unknown. - ErrUnknownDictionary = errors.New("unknown dictionary") - - // ErrFrameSizeExceeded is returned if the stated frame size is exceeded. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeExceeded = errors.New("frame size exceeded") - - // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. - // This is only returned if SingleSegment is specified on the frame. - ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") - - // ErrCRCMismatch is returned if CRC mismatches. - ErrCRCMismatch = errors.New("CRC check failed") - - // ErrDecoderClosed will be returned if the Decoder was used after - // Close has been called. - ErrDecoderClosed = errors.New("decoder used after Close") - - // ErrDecoderNilInput is returned when a nil Reader was provided - // and an operation other than Reset/DecodeAll/Close was attempted. - ErrDecoderNilInput = errors.New("nil input provided as reader") -) - -func println(a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Println(a...) - } -} - -func printf(format string, a ...interface{}) { - if debug || debugDecoder || debugEncoder { - log.Printf(format, a...) - } -} - -func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) -} - -func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) -} - -type byter interface { - Bytes() []byte - Len() int -} - -var _ byter = &bytes.Buffer{} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go new file mode 100644 index 0000000000..9a71a15db1 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go @@ -0,0 +1,30 @@ +// Copyright 2025 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// CollectorFunc is a convenient way to implement a Prometheus Collector +// without interface boilerplate. +// This implementation is based on DescribeByCollect method. +// familiarize yourself to it before using. +type CollectorFunc func(chan<- Metric) + +// Collect calls the defined CollectorFunc function with the provided Metrics channel +func (f CollectorFunc) Collect(ch chan<- Metric) { + f(ch) +} + +// Describe sends the descriptor information using DescribeByCollect +func (f CollectorFunc) Describe(ch chan<- *Desc) { + DescribeByCollect(f, ch) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go index 68ffe3c248..ad347113c0 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -189,12 +189,15 @@ func (d *Desc) String() string { fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), ) } - vlStrings := make([]string, 0, len(d.variableLabels.names)) - for _, vl := range d.variableLabels.names { - if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { - vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) - } else { - vlStrings = append(vlStrings, vl) + vlStrings := []string{} + if d.variableLabels != nil { + vlStrings = make([]string, 0, len(d.variableLabels.names)) + for _, vl := range d.variableLabels.names { + if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil { + vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl)) + } else { + vlStrings = append(vlStrings, vl) + } } } return fmt.Sprintf( diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go index 5117464172..6b8684731c 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go @@ -288,7 +288,7 @@ func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector { } func attachOriginalName(desc, origName string) string { - return fmt.Sprintf("%s Sourced from %s", desc, origName) + return fmt.Sprintf("%s Sourced from %s.", desc, origName) } // Describe returns all descriptions of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 519db348a7..c453b754a7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -14,6 +14,7 @@ package prometheus import ( + "errors" "fmt" "math" "runtime" @@ -28,6 +29,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" ) +const ( + nativeHistogramSchemaMaximum = 8 + nativeHistogramSchemaMinimum = -4 +) + // nativeHistogramBounds for the frac of observed values. Only relevant for // schema > 0. The position in the slice is the schema. (0 is never used, just // here for convenience of using the schema directly as the index.) @@ -330,11 +336,11 @@ func ExponentialBuckets(start, factor float64, count int) []float64 { // used for the Buckets field of HistogramOpts. // // The function panics if 'count' is 0 or negative, if 'min' is 0 or negative. -func ExponentialBucketsRange(min, max float64, count int) []float64 { +func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 { if count < 1 { panic("ExponentialBucketsRange count needs a positive count") } - if min <= 0 { + if minBucket <= 0 { panic("ExponentialBucketsRange min needs to be greater than 0") } @@ -342,12 +348,12 @@ func ExponentialBucketsRange(min, max float64, count int) []float64 { // max = min*growthFactor^(bucketCount-1) // We know max/min and highest bucket. Solve for growthFactor. - growthFactor := math.Pow(max/min, 1.0/float64(count-1)) + growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1)) // Now that we know growthFactor, solve for each bucket. buckets := make([]float64, count) for i := 1; i <= count; i++ { - buckets[i-1] = min * math.Pow(growthFactor, float64(i-1)) + buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1)) } return buckets } @@ -858,15 +864,35 @@ func (h *histogram) Write(out *dto.Metric) error { // findBucket returns the index of the bucket for the provided value, or // len(h.upperBounds) for the +Inf bucket. func (h *histogram) findBucket(v float64) int { - // TODO(beorn7): For small numbers of buckets (<30), a linear search is - // slightly faster than the binary search. If we really care, we could - // switch from one search strategy to the other depending on the number - // of buckets. - // - // Microbenchmarks (BenchmarkHistogramNoLabels): - // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op - // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op - // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + n := len(h.upperBounds) + if n == 0 { + return 0 + } + + // Early exit: if v is less than or equal to the first upper bound, return 0 + if v <= h.upperBounds[0] { + return 0 + } + + // Early exit: if v is greater than the last upper bound, return len(h.upperBounds) + if v > h.upperBounds[n-1] { + return n + } + + // For small arrays, use simple linear search + // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers + // see more details here: https://github.com/prometheus/client_golang/pull/1662 + if n < 35 { + for i, bound := range h.upperBounds { + if v <= bound { + return i + } + } + // If v is greater than all upper bounds, return len(h.upperBounds) + return n + } + + // For larger arrays, use stdlib's binary search return sort.SearchFloat64s(h.upperBounds, v) } @@ -1440,9 +1466,9 @@ func pickSchema(bucketFactor float64) int32 { floor := math.Floor(math.Log2(math.Log2(bucketFactor))) switch { case floor <= -8: - return 8 + return nativeHistogramSchemaMaximum case floor >= 4: - return -4 + return nativeHistogramSchemaMinimum default: return -int32(floor) } @@ -1835,3 +1861,196 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...) } } + +type constNativeHistogram struct { + desc *Desc + dto.Histogram + labelPairs []*dto.LabelPair +} + +func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error { + var bucketPopulationSum int64 + for _, v := range positiveBuckets { + bucketPopulationSum += v + } + for _, v := range negativeBuckets { + bucketPopulationSum += v + } + bucketPopulationSum += int64(zeroBucket) + + // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts. + // Otherwise, the number of observations must be equal to the sum of all bucket counts . + + if math.IsNaN(sum) && bucketPopulationSum > int64(count) || + !math.IsNaN(sum) && bucketPopulationSum != int64(count) { + return errors.New("the sum of all bucket populations exceeds the count of observations") + } + return nil +} + +// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with +// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// zeroBucket counts all (positive and negative) +// observations in the zero bucket (with an absolute value less or equal +// the current threshold). +// positiveBuckets and negativeBuckets are separate maps for negative and positive +// observations. The map's value is an int64, counting observations in +// that bucket. The map's key is the +// index of the bucket according to the used +// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets. +// NewConstNativeHistogram returns an error if +// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid. +// - the schema passed is not between 8 and -4 +// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN) +// +// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus. +func NewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + schema int32, + zeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil { + return nil, err + } + if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum { + return nil, errors.New("invalid native histogram schema") + } + if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil { + return nil, err + } + + NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets) + PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets) + ret := &constNativeHistogram{ + desc: desc, + Histogram: dto.Histogram{ + CreatedTimestamp: timestamppb.New(createdTimestamp), + Schema: &schema, + ZeroThreshold: &zeroThreshold, + SampleCount: &count, + SampleSum: &sum, + + NegativeSpan: NegativeSpan, + NegativeDelta: NegativeDelta, + + PositiveSpan: PositiveSpan, + PositiveDelta: PositiveDelta, + + ZeroCount: proto.Uint64(zeroBucket), + }, + labelPairs: MakeLabelPairs(desc, labelValues), + } + if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 { + ret.PositiveSpan = []*dto.BucketSpan{{ + Offset: proto.Int32(0), + Length: proto.Uint32(0), + }} + } + return ret, nil +} + +// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where +// NewConstNativeHistogram would have returned an error. +func MustNewConstNativeHistogram( + desc *Desc, + count uint64, + sum float64, + positiveBuckets, negativeBuckets map[int]int64, + zeroBucket uint64, + nativeHistogramSchema int32, + nativeHistogramZeroThreshold float64, + createdTimestamp time.Time, + labelValues ...string, +) Metric { + nativehistogram, err := NewConstNativeHistogram(desc, + count, + sum, + positiveBuckets, + negativeBuckets, + zeroBucket, + nativeHistogramSchema, + nativeHistogramZeroThreshold, + createdTimestamp, + labelValues...) + if err != nil { + panic(err) + } + return nativehistogram +} + +func (h *constNativeHistogram) Desc() *Desc { + return h.desc +} + +func (h *constNativeHistogram) Write(out *dto.Metric) error { + out.Histogram = &h.Histogram + out.Label = h.labelPairs + return nil +} + +func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) { + if len(buckets) == 0 { + return nil, nil + } + var ii []int + for k := range buckets { + ii = append(ii, k) + } + sort.Ints(ii) + + var ( + spans []*dto.BucketSpan + deltas []int64 + prevCount int64 + nextI int + ) + + appendDelta := func(count int64) { + *spans[len(spans)-1].Length++ + deltas = append(deltas, count-prevCount) + prevCount = count + } + + for n, i := range ii { + count := buckets[i] + // Multiple spans with only small gaps in between are probably + // encoded more efficiently as one larger span with a few empty + // buckets. Needs some research to find the sweet spot. For now, + // we assume that gaps of one or two buckets should not create + // a new span. + iDelta := int32(i - nextI) + if n == 0 || iDelta > 2 { + // We have to create a new span, either because we are + // at the very beginning, or because we have found a gap + // of more than two buckets. + spans = append(spans, &dto.BucketSpan{ + Offset: proto.Int32(iDelta), + Length: proto.Uint32(0), + }) + } else { + // We have found a small gap (or no gap at all). + // Insert empty buckets as needed. + for j := int32(0); j < iDelta; j++ { + appendDelta(0) + } + } + appendDelta(count) + nextI = i + 1 + } + return spans, deltas +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go index a595a20362..8b016355ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go @@ -22,17 +22,18 @@ import ( "bytes" "fmt" "io" + "strconv" "strings" ) -func min(a, b int) int { +func minInt(a, b int) int { if a < b { return a } return b } -func max(a, b int) int { +func maxInt(a, b int) int { if a > b { return a } @@ -427,12 +428,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { if codes[0].Tag == 'e' { c := codes[0] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2} } if codes[len(codes)-1].Tag == 'e' { c := codes[len(codes)-1] i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)} } nn := n + n groups := [][]OpCode{} @@ -443,12 +444,12 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { // there is a large range with no changes. if c.Tag == 'e' && i2-i1 > nn { group = append(group, OpCode{ - c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n), + c.Tag, i1, minInt(i2, i1+n), + j1, minInt(j2, j1+n), }) groups = append(groups, group) group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) + i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n) } group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) } @@ -515,7 +516,7 @@ func (m *SequenceMatcher) QuickRatio() float64 { // is faster to compute than either .Ratio() or .QuickRatio(). func (m *SequenceMatcher) RealQuickRatio() float64 { la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) + return calculateRatio(minInt(la, lb), la+lb) } // Convert range to the "ed" format @@ -524,7 +525,7 @@ func formatRangeUnified(start, stop int) string { beginning := start + 1 // lines start numbering with one length := stop - start if length == 1 { - return fmt.Sprintf("%d", beginning) + return strconv.Itoa(beginning) } if length == 0 { beginning-- // empty ranges begin at line just before the range diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go index 97d17d6cb6..f7f97ef926 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go @@ -66,7 +66,8 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) name += "_total" } - valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name)) + // Our current conversion moves to legacy naming, so use legacy validation. + valid := model.IsValidLegacyMetricName(namespace + "_" + subsystem + "_" + name) switch d.Kind { case metrics.KindUint64: case metrics.KindFloat64: diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go index 9d9b81ab44..592eec3e24 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -108,15 +108,23 @@ func BuildFQName(namespace, subsystem, name string) string { if name == "" { return "" } - switch { - case namespace != "" && subsystem != "": - return strings.Join([]string{namespace, subsystem, name}, "_") - case namespace != "": - return strings.Join([]string{namespace, name}, "_") - case subsystem != "": - return strings.Join([]string{subsystem, name}, "_") + + sb := strings.Builder{} + sb.Grow(len(namespace) + len(subsystem) + len(name) + 2) + + if namespace != "" { + sb.WriteString(namespace) + sb.WriteString("_") } - return name + + if subsystem != "" { + sb.WriteString(subsystem) + sb.WriteString("_") + } + + sb.WriteString(name) + + return sb.String() } type invalidMetric struct { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 62a4e7ad9a..e7bce8b58e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -23,6 +23,7 @@ import ( type processCollector struct { collectFn func(chan<- Metric) + describeFn func(chan<- *Desc) pidFn func() (int, error) reportErrors bool cpuTotal *Desc @@ -122,26 +123,23 @@ func NewProcessCollector(opts ProcessCollectorOpts) Collector { // Set up process metric collection if supported by the runtime. if canCollectProcess() { c.collectFn = c.processCollect + c.describeFn = c.describe } else { - c.collectFn = func(ch chan<- Metric) { - c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) - } + c.collectFn = c.errorCollectFn + c.describeFn = c.errorDescribeFn } return c } -// Describe returns all descriptions of the collector. -func (c *processCollector) Describe(ch chan<- *Desc) { - ch <- c.cpuTotal - ch <- c.openFDs - ch <- c.maxFDs - ch <- c.vsize - ch <- c.maxVsize - ch <- c.rss - ch <- c.startTime - ch <- c.inBytes - ch <- c.outBytes +func (c *processCollector) errorCollectFn(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) +} + +func (c *processCollector) errorDescribeFn(ch chan<- *Desc) { + if c.reportErrors { + ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform")) + } } // Collect returns the current state of all metrics of the collector. @@ -149,6 +147,11 @@ func (c *processCollector) Collect(ch chan<- Metric) { c.collectFn(ch) } +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + c.describeFn(ch) +} + func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { if !c.reportErrors { return diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go new file mode 100644 index 0000000000..0a61b98461 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go @@ -0,0 +1,130 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios + +package prometheus + +import ( + "errors" + "fmt" + "os" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// notImplementedErr is returned by stub functions that replace cgo functions, when cgo +// isn't available. +var notImplementedErr = errors.New("not implemented") + +type memoryInfo struct { + vsize uint64 // Virtual memory size in bytes + rss uint64 // Resident memory size in bytes +} + +func canCollectProcess() bool { + return true +} + +func getSoftLimit(which int) (uint64, error) { + rlimit := syscall.Rlimit{} + + if err := syscall.Getrlimit(which, &rlimit); err != nil { + return 0, err + } + + return rlimit.Cur, nil +} + +func getOpenFileCount() (float64, error) { + // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to + // return a list of open fds, but that requires a way to call C APIs. The + // benefits, however, include fewer system calls and not failing when at the + // open file soft limit. + + if dir, err := os.Open("/dev/fd"); err != nil { + return 0.0, err + } else { + defer dir.Close() + + // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is + // that info not used, but KQUEUE descriptors fail stat(2), which causes + // the whole method to fail. + if names, err := dir.Readdirnames(0); err != nil { + return 0.0, err + } else { + // Subtract 1 to ignore the open /dev/fd descriptor above. + return float64(len(names) - 1), nil + } + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil { + if len(procs) == 1 { + startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9) + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs)) + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, c.startTime, err) + } + + // The proc structure returned by kern.proc.pid above has an Rusage member, + // but it is not filled in, so it needs to be fetched by getrusage(2). For + // that call, the UTime, STime, and Maxrss members are filled out, but not + // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require + // access to the C API to call task_info(TASK_BASIC_INFO). + rusage := unix.Rusage{} + + if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil { + cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds() + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime) + } else { + c.reportError(ch, c.cpuTotal, err) + } + + if memInfo, err := getMemory(); err == nil { + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss)) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize)) + } else if !errors.Is(err, notImplementedErr) { + // Don't report an error when support is not compiled in. + c.reportError(ch, c.rss, err) + c.reportError(ch, c.vsize, err) + } + + if fds, err := getOpenFileCount(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds) + } else { + c.reportError(ch, c.openFDs, err) + } + + if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles)) + } else { + c.reportError(ch, c.maxFDs, err) + } + + if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil { + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace)) + } else { + c.reportError(ch, c.maxVsize, err) + } + + // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might + // be able to get the per-process network send/receive counts. +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c new file mode 100644 index 0000000000..d00a24315d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c @@ -0,0 +1,84 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +#include +#include +#include + +// The compiler warns that mach/shared_memory_server.h is deprecated, and to use +// mach/shared_region.h instead. But that doesn't define +// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and +// avoid a warning message when running tests. +#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U +#define SHARED_DATA_REGION_SIZE 0x10000000 +#define SHARED_TEXT_REGION_SIZE 0x10000000 + + +int get_memory_info(unsigned long long *rss, unsigned long long *vsize) +{ + // This is lightly adapted from how ps(1) obtains its memory info. + // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109 + + kern_return_t error; + task_t task = MACH_PORT_NULL; + mach_task_basic_info_data_t info; + mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT; + + error = task_info( + mach_task_self(), + MACH_TASK_BASIC_INFO, + (task_info_t) &info, + &info_count ); + + if( error != KERN_SUCCESS ) + { + return error; + } + + *rss = info.resident_size; + *vsize = info.virtual_size; + + { + vm_region_basic_info_data_64_t b_info; + mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT; + mach_vm_size_t size; + mach_port_t object_name; + + /* + * try to determine if this task has the split libraries + * mapped in... if so, adjust its virtual size down by + * the 2 segments that are used for split libraries + */ + info_count = VM_REGION_BASIC_INFO_COUNT_64; + + error = mach_vm_region( + mach_task_self(), + &address, + &size, + VM_REGION_BASIC_INFO_64, + (vm_region_info_t) &b_info, + &info_count, + &object_name); + + if (error == KERN_SUCCESS) { + if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) && + *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) { + *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE); + } + } + } + + return 0; +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go new file mode 100644 index 0000000000..9ac53f9992 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go @@ -0,0 +1,51 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && cgo + +package prometheus + +/* +int get_memory_info(unsigned long long *rss, unsigned long long *vs); +*/ +import "C" +import "fmt" + +func getMemory() (*memoryInfo, error) { + var rss, vsize C.ulonglong + + if err := C.get_memory_info(&rss, &vsize); err != 0 { + return nil, fmt.Errorf("task_info() failed with 0x%x", int(err)) + } + + return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + ch <- c.rss + ch <- c.vsize + + /* the process could be collected but not implemented yet + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go new file mode 100644 index 0000000000..8ddb0995d6 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build darwin && !ios && !cgo + +package prometheus + +func getMemory() (*memoryInfo, error) { + return nil, notImplementedErr +} + +// describe returns all descriptions of the collector for Darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.maxVsize + ch <- c.startTime + + /* the process could be collected but not implemented yet + ch <- c.rss + ch <- c.vsize + ch <- c.inBytes + ch <- c.outBytes + */ +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go new file mode 100644 index 0000000000..7732b7f376 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build wasip1 || js || ios +// +build wasip1 js ios + +package prometheus + +func canCollectProcess() bool { + return false +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + c.errorCollectFn(ch) +} + +// describe returns all descriptions of the collector for wasip1 and js. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + c.errorDescribeFn(ch) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go similarity index 77% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go rename to vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go index 14d56d2d06..9f4b130bef 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go @@ -11,8 +11,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !windows && !js && !wasip1 -// +build !windows,!js,!wasip1 +//go:build !windows && !js && !wasip1 && !darwin +// +build !windows,!js,!wasip1,!darwin package prometheus @@ -78,3 +78,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { c.reportError(ch, nil, err) } } + +// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go index f973398df2..fa474289ef 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -79,14 +79,10 @@ func getProcessHandleCount(handle windows.Handle) (uint32, error) { } func (c *processCollector) processCollect(ch chan<- Metric) { - h, err := windows.GetCurrentProcess() - if err != nil { - c.reportError(ch, nil, err) - return - } + h := windows.CurrentProcess() var startTime, exitTime, kernelTime, userTime windows.Filetime - err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) if err != nil { c.reportError(ch, nil, err) return @@ -111,6 +107,19 @@ func (c *processCollector) processCollect(ch chan<- Metric) { ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. } +// describe returns all descriptions of the collector for windows. +// Ensure that this list of descriptors is kept in sync with the metrics collected +// in the processCollect method. Any changes to the metrics in processCollect +// (such as adding or removing metrics) should be reflected in this list of descriptors. +func (c *processCollector) describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.rss + ch <- c.startTime +} + func fileTimeToSeconds(ft windows.Filetime) float64 { return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index e598e66e68..763d99e362 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -41,11 +41,11 @@ import ( "sync" "time" - "github.com/klauspost/compress/zstd" "github.com/prometheus/common/expfmt" "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil" "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp/internal" ) const ( @@ -65,7 +65,13 @@ const ( Zstd Compression = "zstd" ) -var defaultCompressionFormats = []Compression{Identity, Gzip, Zstd} +func defaultCompressionFormats() []Compression { + if internal.NewZstdWriter != nil { + return []Compression{Identity, Gzip, Zstd} + } else { + return []Compression{Identity, Gzip} + } +} var gzipPool = sync.Pool{ New: func() interface{} { @@ -138,7 +144,7 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO // Select compression formats to offer based on default or user choice. var compressions []string if !opts.DisableCompression { - offers := defaultCompressionFormats + offers := defaultCompressionFormats() if len(opts.OfferedCompressions) > 0 { offers = opts.OfferedCompressions } @@ -207,7 +213,13 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO if encodingHeader != string(Identity) { rsp.Header().Set(contentEncodingHeader, encodingHeader) } - enc := expfmt.NewEncoder(w, contentType) + + var enc expfmt.Encoder + if opts.EnableOpenMetricsTextCreatedSamples { + enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines()) + } else { + enc = expfmt.NewEncoder(w, contentType) + } // handleError handles the error according to opts.ErrorHandling // and returns true if we have to abort after the handling. @@ -408,6 +420,21 @@ type HandlerOpts struct { // (which changes the identity of the resulting series on the Prometheus // server). EnableOpenMetrics bool + // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic + // Created Timestamps for counters, histograms and summaries, which for the current + // version of OpenMetrics are defined as extra series with the same name and "_created" + // suffix. See also the OpenMetrics specification for more details + // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1 + // + // Created timestamps are used to improve the accuracy of reset detection, + // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality + // if the scraper does not handle those metrics correctly (converting to created timestamp + // instead of leaving those series as-is). New OpenMetrics versions might improve + // this situation. + // + // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion' + // in version 2.50.0 to handle this situation. + EnableOpenMetricsTextCreatedSamples bool // ProcessStartTime allows setting process start timevalue that will be exposed // with "Process-Start-Time-Unix" response header along with the metrics // payload. This allow callers to have efficient transformations to cumulative @@ -445,14 +472,12 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin switch selected { case "zstd": - // TODO(mrueg): Replace klauspost/compress with stdlib implementation once https://github.com/golang/go/issues/62513 is implemented. - z, err := zstd.NewWriter(rw, zstd.WithEncoderLevel(zstd.SpeedFastest)) - if err != nil { - return nil, "", func() {}, err + if internal.NewZstdWriter == nil { + // The content encoding was not implemented yet. + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } - - z.Reset(rw) - return z, selected, func() { _ = z.Close() }, nil + writer, closeWriter, err := internal.NewZstdWriter(rw) + return writer, selected, closeWriter, err case "gzip": gz := gzipPool.Get().(*gzip.Writer) gz.Reset(rw) @@ -462,6 +487,6 @@ func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []strin return rw, selected, func() {}, nil default: // The content encoding was not implemented yet. - return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats) + return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats()) } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go similarity index 70% rename from vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go rename to vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go index b1e363d6cf..c5039590f7 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_js.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go @@ -1,4 +1,4 @@ -// Copyright 2019 The Prometheus Authors +// Copyright 2025 The Prometheus Authors // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -11,16 +11,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build js -// +build js +package internal -package prometheus +import ( + "io" +) -func canCollectProcess() bool { - return false -} - -func (c *processCollector) processCollect(ch chan<- Metric) { - // noop on this platform - return -} +// NewZstdWriter enables zstd write support if non-nil. +var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error) diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go index 1ab0e47965..ac5203c6fa 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -243,6 +243,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { s := &summary{ desc: desc, + now: opts.now, objectives: opts.Objectives, sortedObjectives: make([]float64, 0, len(opts.Objectives)), @@ -280,6 +281,8 @@ type summary struct { desc *Desc + now func() time.Time + objectives map[float64]float64 sortedObjectives []float64 @@ -307,7 +310,7 @@ func (s *summary) Observe(v float64) { s.bufMtx.Lock() defer s.bufMtx.Unlock() - now := time.Now() + now := s.now() if now.After(s.hotBufExpTime) { s.asyncFlush(now) } @@ -326,7 +329,7 @@ func (s *summary) Write(out *dto.Metric) error { s.bufMtx.Lock() s.mtx.Lock() // Swap bufs even if hotBuf is empty to set new hotBufExpTime. - s.swapBufs(time.Now()) + s.swapBufs(s.now()) s.bufMtx.Unlock() s.flushColdBuf() diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go index fdc1e62394..68645ed0a9 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/promlint/validations/duplicate_validations.go @@ -14,7 +14,7 @@ package validations import ( - "fmt" + "errors" "reflect" dto "github.com/prometheus/client_model/go" @@ -27,7 +27,7 @@ func LintDuplicateMetric(mf *dto.MetricFamily) []error { for i, m := range mf.Metric { for _, k := range mf.Metric[i+1:] { if reflect.DeepEqual(m.Label, k.Label) { - problems = append(problems, fmt.Errorf("metric not unique")) + problems = append(problems, errors.New("metric not unique")) break } } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go index 6f1200180a..1258508e4f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go @@ -39,6 +39,7 @@ package testutil import ( "bytes" + "errors" "fmt" "io" "net/http" @@ -46,6 +47,7 @@ import ( "github.com/kylelemons/godebug/diff" dto "github.com/prometheus/client_model/go" "github.com/prometheus/common/expfmt" + "github.com/prometheus/common/model" "google.golang.org/protobuf/proto" "github.com/prometheus/client_golang/prometheus" @@ -300,20 +302,20 @@ func compareMetricFamilies(got, expected []*dto.MetricFamily, metricNames ...str // result. func compare(got, want []*dto.MetricFamily) error { var gotBuf, wantBuf bytes.Buffer - enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc := expfmt.NewEncoder(&gotBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range got { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding gathered metrics failed: %w", err) } } - enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain)) + enc = expfmt.NewEncoder(&wantBuf, expfmt.NewFormat(expfmt.TypeTextPlain).WithEscapingScheme(model.NoEscaping)) for _, mf := range want { if err := enc.Encode(mf); err != nil { return fmt.Errorf("encoding expected metrics failed: %w", err) } } if diffErr := diff.Diff(gotBuf.String(), wantBuf.String()); diffErr != "" { - return fmt.Errorf(diffErr) + return errors.New(diffErr) } return nil } diff --git a/vendor/github.com/prometheus/common/config/headers.go b/vendor/github.com/prometheus/common/config/headers.go index 7276742ec9..9beaae26c2 100644 --- a/vendor/github.com/prometheus/common/config/headers.go +++ b/vendor/github.com/prometheus/common/config/headers.go @@ -24,9 +24,9 @@ import ( "strings" ) -// reservedHeaders that change the connection, are set by Prometheus, or can +// ReservedHeaders that change the connection, are set by Prometheus, or can // be changed otherwise. -var reservedHeaders = map[string]struct{}{ +var ReservedHeaders = map[string]struct{}{ "Authorization": {}, "Host": {}, "Content-Encoding": {}, @@ -72,7 +72,7 @@ func (h *Headers) SetDirectory(dir string) { // Validate validates the Headers config. func (h *Headers) Validate() error { for n := range h.Headers { - if _, ok := reservedHeaders[http.CanonicalHeaderKey(n)]; ok { + if _, ok := ReservedHeaders[http.CanonicalHeaderKey(n)]; ok { return fmt.Errorf("setting header %q is not allowed", http.CanonicalHeaderKey(n)) } } diff --git a/vendor/github.com/prometheus/common/config/http_config.go b/vendor/github.com/prometheus/common/config/http_config.go index 57ec252adf..63809083ac 100644 --- a/vendor/github.com/prometheus/common/config/http_config.go +++ b/vendor/github.com/prometheus/common/config/http_config.go @@ -52,7 +52,8 @@ var ( http2Enabled: true, // 5 minutes is typically above the maximum sane scrape interval. So we can // use keepalive for all configurations. - idleConnTimeout: 5 * time.Minute, + idleConnTimeout: 5 * time.Minute, + newTLSConfigFunc: NewTLSConfigWithContext, } ) @@ -452,8 +453,12 @@ func (a *BasicAuth) UnmarshalYAML(unmarshal func(interface{}) error) error { // by net.Dialer. type DialContextFunc func(context.Context, string, string) (net.Conn, error) +// NewTLSConfigFunc returns tls.Config. +type NewTLSConfigFunc func(context.Context, *TLSConfig, ...TLSConfigOption) (*tls.Config, error) + type httpClientOptions struct { dialContextFunc DialContextFunc + newTLSConfigFunc NewTLSConfigFunc keepAlivesEnabled bool http2Enabled bool idleConnTimeout time.Duration @@ -473,13 +478,23 @@ func (f httpClientOptionFunc) applyToHTTPClientOptions(options *httpClientOption f(options) } -// WithDialContextFunc allows you to override func gets used for the actual dialing. The default is `net.Dialer.DialContext`. +// WithDialContextFunc allows you to override the func gets used for the dialing. +// The default is `net.Dialer.DialContext`. func WithDialContextFunc(fn DialContextFunc) HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { opts.dialContextFunc = fn }) } +// WithNewTLSConfigFunc allows you to override the func that creates the TLS config +// from the prometheus http config. +// The default is `NewTLSConfigWithContext`. +func WithNewTLSConfigFunc(newTLSConfigFunc NewTLSConfigFunc) HTTPClientOption { + return httpClientOptionFunc(func(opts *httpClientOptions) { + opts.newTLSConfigFunc = newTLSConfigFunc + }) +} + // WithKeepAlivesDisabled allows to disable HTTP keepalive. func WithKeepAlivesDisabled() HTTPClientOption { return httpClientOptionFunc(func(opts *httpClientOptions) { @@ -670,7 +685,7 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon return rt, nil } - tlsConfig, err := NewTLSConfig(&cfg.TLSConfig, WithSecretManager(opts.secretManager)) + tlsConfig, err := opts.newTLSConfigFunc(ctx, &cfg.TLSConfig, WithSecretManager(opts.secretManager)) if err != nil { return nil, err } @@ -679,6 +694,7 @@ func NewRoundTripperFromConfigWithContext(ctx context.Context, cfg HTTPClientCon if err != nil { return nil, err } + if tlsSettings.immutable() { // No need for a RoundTripper that reloads the files automatically. return newRT(tlsConfig) diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index f1c495dd60..a21ed4ec1f 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -38,7 +38,7 @@ type EncoderOption func(*encoderOption) // WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder // to include _created lines (See -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1). // Created timestamps can improve the accuracy of series reset detection, but // come with a bandwidth cost. // @@ -102,7 +102,7 @@ func WithUnit() EncoderOption { // // - According to the OM specs, the `# UNIT` line is optional, but if populated, // the unit has to be present in the metric name as its suffix: -// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit). // However, in order to accommodate any potential scenario where such a change in the // metric name is not desirable, the users are here given the choice of either explicitly // opt in, in case they wish for the unit to be included in the output AND in the metric name diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index bd3a39e3e1..460f554f29 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -65,7 +65,7 @@ func (a *Alert) Resolved() bool { return a.ResolvedAt(time.Now()) } -// ResolvedAt returns true off the activity interval ended before +// ResolvedAt returns true iff the activity interval ended before // the given timestamp. func (a *Alert) ResolvedAt(ts time.Time) bool { if a.EndsAt.IsZero() { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 73b7aa3e60..f4a387605f 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -22,7 +22,7 @@ import ( ) const ( - // AlertNameLabel is the name of the label containing the an alert's name. + // AlertNameLabel is the name of the label containing the alert's name. AlertNameLabel = "alertname" // ExportedLabelPrefix is the prefix to prepend to the label names present in diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 0daca836af..a6b01755bd 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -27,14 +27,26 @@ import ( ) var ( - // NameValidationScheme determines the method of name validation to be used by - // all calls to IsValidMetricName() and LabelName IsValid(). Setting UTF-8 mode - // in isolation from other components that don't support UTF-8 may result in - // bugs or other undefined behavior. This value is intended to be set by - // UTF-8-aware binaries as part of their startup. To avoid need for locking, - // this value should be set once, ideally in an init(), before multiple - // goroutines are started. - NameValidationScheme = LegacyValidation + // NameValidationScheme determines the global default method of the name + // validation to be used by all calls to IsValidMetricName() and LabelName + // IsValid(). + // + // Deprecated: This variable should not be used and might be removed in the + // far future. If you wish to stick to the legacy name validation use + // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods + // instead. This variable is here as an escape hatch for emergency cases, + // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g., + // to delay UTF-8 migrations in time or aid in debugging unforeseen results of + // the change. In such a case, a temporary assignment to `LegacyValidation` + // value in the `init()` function in your main.go or so, could be considered. + // + // Historically we opted for a global variable for feature gating different + // validation schemes in operations that were not otherwise easily adjustable + // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate + // Labels structure or package might have been a better choice. Given the + // change was made and many upgraded the common already, we live this as-is + // with this warning and learning for the future. + NameValidationScheme = UTF8Validation // NameEscapingScheme defines the default way that names will be escaped when // presented to systems that do not support UTF-8 names. If the Content-Type @@ -50,7 +62,7 @@ var ( type ValidationScheme int const ( - // LegacyValidation is a setting that requirets that metric and label names + // LegacyValidation is a setting that requires that all metric and label names // conform to the original Prometheus character requirements described by // MetricNameRE and LabelNameRE. LegacyValidation ValidationScheme = iota diff --git a/vendor/github.com/prometheus/common/promslog/slog.go b/vendor/github.com/prometheus/common/promslog/slog.go new file mode 100644 index 0000000000..f9f8996631 --- /dev/null +++ b/vendor/github.com/prometheus/common/promslog/slog.go @@ -0,0 +1,271 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promslog defines standardised ways to initialize the Go standard +// library's log/slog logger. +// It should typically only ever be imported by main packages. + +package promslog + +import ( + "fmt" + "io" + "log/slog" + "os" + "path/filepath" + "strconv" + "strings" + "time" +) + +// LogStyle represents the common logging formats in the Prometheus ecosystem. +type LogStyle string + +const ( + SlogStyle LogStyle = "slog" + GoKitStyle LogStyle = "go-kit" + + reservedKeyPrefix = "logged_" +) + +var ( + // LevelFlagOptions represents allowed logging levels. + LevelFlagOptions = []string{"debug", "info", "warn", "error"} + // FormatFlagOptions represents allowed formats. + FormatFlagOptions = []string{"logfmt", "json"} + + defaultWriter = os.Stderr +) + +// Level controls a logging level, with an info default. +// It wraps slog.LevelVar with string-based level control. +// Level is safe to be used concurrently. +type Level struct { + lvl *slog.LevelVar +} + +// NewLevel returns a new Level. +func NewLevel() *Level { + return &Level{ + lvl: &slog.LevelVar{}, + } +} + +func (l *Level) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + type plain string + if err := unmarshal((*plain)(&s)); err != nil { + return err + } + if s == "" { + return nil + } + if err := l.Set(s); err != nil { + return err + } + return nil +} + +// String returns the current level. +func (l *Level) String() string { + switch l.lvl.Level() { + case slog.LevelDebug: + return "debug" + case slog.LevelInfo: + return "info" + case slog.LevelWarn: + return "warn" + case slog.LevelError: + return "error" + default: + return "" + } +} + +// Set updates the logging level with the validation. +func (l *Level) Set(s string) error { + switch strings.ToLower(s) { + case "debug": + l.lvl.Set(slog.LevelDebug) + case "info": + l.lvl.Set(slog.LevelInfo) + case "warn": + l.lvl.Set(slog.LevelWarn) + case "error": + l.lvl.Set(slog.LevelError) + default: + return fmt.Errorf("unrecognized log level %s", s) + } + return nil +} + +// Format controls a logging output format. +// Not concurrency-safe. +type Format struct { + s string +} + +// NewFormat creates a new Format. +func NewFormat() *Format { return &Format{} } + +func (f *Format) String() string { + return f.s +} + +// Set updates the value of the allowed format. +func (f *Format) Set(s string) error { + switch s { + case "logfmt", "json": + f.s = s + default: + return fmt.Errorf("unrecognized log format %s", s) + } + return nil +} + +// Config is a struct containing configurable settings for the logger +type Config struct { + Level *Level + Format *Format + Style LogStyle + Writer io.Writer +} + +func newGoKitStyleReplaceAttrFunc(lvl *Level) func(groups []string, a slog.Attr) slog.Attr { + return func(groups []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey, "ts": + if t, ok := a.Value.Any().(time.Time); ok { + a.Key = "ts" + + // This timestamp format differs from RFC3339Nano by using .000 instead + // of .999999999 which changes the timestamp from 9 variable to 3 fixed + // decimals (.130 instead of .130987456). + a.Value = slog.StringValue(t.UTC().Format("2006-01-02T15:04:05.000Z07:00")) + } else { + // If we can't cast the any from the value to a + // time.Time, it means the caller logged + // another attribute with a key of `ts`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_ts`. + a.Key = reservedKeyPrefix + key + } + case slog.SourceKey, "caller": + if src, ok := a.Value.Any().(*slog.Source); ok { + a.Key = "caller" + switch lvl.String() { + case "debug": + a.Value = slog.StringValue(filepath.Base(src.File) + "(" + filepath.Base(src.Function) + "):" + strconv.Itoa(src.Line)) + default: + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } + } else { + // If we can't cast the any from the value to + // an *slog.Source, it means the caller logged + // another attribute with a key of `caller`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_caller`. + a.Key = reservedKeyPrefix + key + } + case slog.LevelKey: + if lvl, ok := a.Value.Any().(slog.Level); ok { + a.Value = slog.StringValue(strings.ToLower(lvl.String())) + } else { + // If we can't cast the any from the value to + // an slog.Level, it means the caller logged + // another attribute with a key of `level`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_level`. + a.Key = reservedKeyPrefix + key + } + default: + } + return a + } +} + +func defaultReplaceAttr(_ []string, a slog.Attr) slog.Attr { + key := a.Key + switch key { + case slog.TimeKey: + if t, ok := a.Value.Any().(time.Time); ok { + a.Value = slog.TimeValue(t.UTC()) + } else { + // If we can't cast the any from the value to a + // time.Time, it means the caller logged + // another attribute with a key of `time`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to `logged_time`. + a.Key = reservedKeyPrefix + key + } + case slog.SourceKey: + if src, ok := a.Value.Any().(*slog.Source); ok { + a.Value = slog.StringValue(filepath.Base(src.File) + ":" + strconv.Itoa(src.Line)) + } else { + // If we can't cast the any from the value to + // an *slog.Source, it means the caller logged + // another attribute with a key of `source`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_source`. + a.Key = reservedKeyPrefix + key + } + case slog.LevelKey: + if _, ok := a.Value.Any().(slog.Level); !ok { + // If we can't cast the any from the value to + // an slog.Level, it means the caller logged + // another attribute with a key of `level`. + // Prevent duplicate keys (necessary for proper + // JSON) by renaming the key to + // `logged_level`. + a.Key = reservedKeyPrefix + key + } + default: + } + return a +} + +// New returns a new slog.Logger. Each logged line will be annotated +// with a timestamp. The output always goes to stderr. +func New(config *Config) *slog.Logger { + if config.Level == nil { + config.Level = NewLevel() + } + + if config.Writer == nil { + config.Writer = defaultWriter + } + + logHandlerOpts := &slog.HandlerOptions{ + Level: config.Level.lvl, + AddSource: true, + ReplaceAttr: defaultReplaceAttr, + } + + if config.Style == GoKitStyle { + logHandlerOpts.ReplaceAttr = newGoKitStyleReplaceAttrFunc(config.Level) + } + + if config.Format != nil && config.Format.s == "json" { + return slog.New(slog.NewJSONHandler(config.Writer, logHandlerOpts)) + } + return slog.New(slog.NewTextHandler(config.Writer, logHandlerOpts)) +} + +// NewNopLogger is a convenience function to return an slog.Logger that writes +// to io.Discard. +func NewNopLogger() *slog.Logger { + return slog.New(slog.NewTextHandler(io.Discard, nil)) +} diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 173689d6af..f140044baa 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -16,6 +16,8 @@ package config import ( "errors" "fmt" + "log/slog" + "mime" "net/url" "os" "path/filepath" @@ -25,18 +27,17 @@ import ( "time" "github.com/alecthomas/units" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/grafana/regexp" "github.com/prometheus/common/config" "github.com/prometheus/common/model" - "github.com/prometheus/common/sigv4" + "github.com/prometheus/sigv4" "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/model/relabel" "github.com/prometheus/prometheus/storage/remote/azuread" + "github.com/prometheus/prometheus/storage/remote/googleiam" ) var ( @@ -66,8 +67,13 @@ var ( } ) +const ( + LegacyValidationConfig = "legacy" + UTF8ValidationConfig = "utf8" +) + // Load parses the YAML input s into a Config. -func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, error) { +func Load(s string, logger *slog.Logger) (*Config, error) { cfg := &Config{} // If the entire config body is empty the UnmarshalYAML method is // never called. We thus have to set the DefaultConfig at the entry @@ -79,10 +85,6 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro return nil, err } - if !expandExternalLabels { - return cfg, nil - } - b := labels.NewScratchBuilder(0) cfg.GlobalConfig.ExternalLabels.Range(func(v labels.Label) { newV := os.Expand(v.Value, func(s string) string { @@ -92,26 +94,41 @@ func Load(s string, expandExternalLabels bool, logger log.Logger) (*Config, erro if v := os.Getenv(s); v != "" { return v } - level.Warn(logger).Log("msg", "Empty environment variable", "name", s) + logger.Warn("Empty environment variable", "name", s) return "" }) if newV != v.Value { - level.Debug(logger).Log("msg", "External label replaced", "label", v.Name, "input", v.Value, "output", newV) + logger.Debug("External label replaced", "label", v.Name, "input", v.Value, "output", newV) } // Note newV can be blank. https://github.com/prometheus/prometheus/issues/11024 b.Add(v.Name, newV) }) - cfg.GlobalConfig.ExternalLabels = b.Labels() + if !b.Labels().IsEmpty() { + cfg.GlobalConfig.ExternalLabels = b.Labels() + } + + switch cfg.OTLPConfig.TranslationStrategy { + case UnderscoreEscapingWithSuffixes: + case "": + case NoTranslation, NoUTF8EscapingWithSuffixes: + if cfg.GlobalConfig.MetricNameValidationScheme == LegacyValidationConfig { + return nil, fmt.Errorf("OTLP translation strategy %q is not allowed when UTF8 is disabled", cfg.OTLPConfig.TranslationStrategy) + } + default: + return nil, fmt.Errorf("unsupported OTLP translation strategy %q", cfg.OTLPConfig.TranslationStrategy) + } + cfg.loaded = true return cfg, nil } -// LoadFile parses the given YAML file into a Config. -func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) { +// LoadFile parses and validates the given YAML file into a read-only Config. +// Callers should never write to or shallow copy the returned Config. +func LoadFile(filename string, agentMode bool, logger *slog.Logger) (*Config, error) { content, err := os.ReadFile(filename) if err != nil { return nil, err } - cfg, err := Load(string(content), expandExternalLabels, logger) + cfg, err := Load(string(content), logger) if err != nil { return nil, fmt.Errorf("parsing YAML file %s: %w", filename, err) } @@ -139,6 +156,7 @@ var ( // DefaultConfig is the default top-level configuration. DefaultConfig = Config{ GlobalConfig: DefaultGlobalConfig, + Runtime: DefaultRuntimeConfig, } // DefaultGlobalConfig is the default global configuration. @@ -149,7 +167,8 @@ var ( RuleQueryOffset: model.Duration(0 * time.Minute), // When native histogram feature flag is enabled, ScrapeProtocols default // changes to DefaultNativeHistogramScrapeProtocols. - ScrapeProtocols: DefaultScrapeProtocols, + ScrapeProtocols: DefaultScrapeProtocols, + ConvertClassicHistogramsToNHCB: false, } DefaultRuntimeConfig = RuntimeConfig{ @@ -160,13 +179,13 @@ var ( // DefaultScrapeConfig is the default scrape configuration. DefaultScrapeConfig = ScrapeConfig{ // ScrapeTimeout, ScrapeInterval and ScrapeProtocols default to the configured globals. - ScrapeClassicHistograms: false, - MetricsPath: "/metrics", - Scheme: "http", - HonorLabels: false, - HonorTimestamps: true, - HTTPClientConfig: config.DefaultHTTPClientConfig, - EnableCompression: true, + AlwaysScrapeClassicHistograms: false, + MetricsPath: "/metrics", + Scheme: "http", + HonorLabels: false, + HonorTimestamps: true, + HTTPClientConfig: config.DefaultHTTPClientConfig, + EnableCompression: true, } // DefaultAlertmanagerConfig is the default alertmanager configuration. @@ -177,13 +196,18 @@ var ( HTTPClientConfig: config.DefaultHTTPClientConfig, } + DefaultRemoteWriteHTTPClientConfig = config.HTTPClientConfig{ + FollowRedirects: true, + EnableHTTP2: false, + } + // DefaultRemoteWriteConfig is the default remote write configuration. DefaultRemoteWriteConfig = RemoteWriteConfig{ RemoteTimeout: model.Duration(30 * time.Second), ProtobufMessage: RemoteWriteProtoMsgV1, QueueConfig: DefaultQueueConfig, MetadataConfig: DefaultMetadataConfig, - HTTPClientConfig: config.DefaultHTTPClientConfig, + HTTPClientConfig: DefaultRemoteWriteHTTPClientConfig, } // DefaultQueueConfig is the default remote queue configuration. @@ -215,6 +239,7 @@ var ( // DefaultRemoteReadConfig is the default remote read configuration. DefaultRemoteReadConfig = RemoteReadConfig{ RemoteTimeout: model.Duration(1 * time.Minute), + ChunkedReadLimit: DefaultChunkedReadLimit, HTTPClientConfig: config.DefaultHTTPClientConfig, FilterExternalLabels: true, } @@ -227,6 +252,11 @@ var ( DefaultExemplarsConfig = ExemplarsConfig{ MaxExemplars: 100000, } + + // DefaultOTLPConfig is the default OTLP configuration. + DefaultOTLPConfig = OTLPConfig{ + TranslationStrategy: UnderscoreEscapingWithSuffixes, + } ) // Config is the top-level configuration for Prometheus's config files. @@ -242,9 +272,13 @@ type Config struct { RemoteWriteConfigs []*RemoteWriteConfig `yaml:"remote_write,omitempty"` RemoteReadConfigs []*RemoteReadConfig `yaml:"remote_read,omitempty"` + OTLPConfig OTLPConfig `yaml:"otlp,omitempty"` + + loaded bool // Certain methods require configuration to use Load validation. } // SetDirectory joins any relative file paths with dir. +// This method writes to config, and it's not concurrency safe. func (c *Config) SetDirectory(dir string) { c.GlobalConfig.SetDirectory(dir) c.AlertingConfig.SetDirectory(dir) @@ -274,24 +308,26 @@ func (c Config) String() string { return string(b) } -// GetScrapeConfigs returns the scrape configurations. +// GetScrapeConfigs returns the read-only, validated scrape configurations including +// the ones from the scrape_config_files. +// This method does not write to config, and it's concurrency safe (the pointer receiver is for efficiency). +// This method also assumes the Config was created by Load or LoadFile function, it returns error +// if it was not. We can't re-validate or apply globals here due to races, +// read more https://github.com/prometheus/prometheus/issues/15538. func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { - scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) + if !c.loaded { + // Programmatic error, we warn before more confusing errors would happen due to lack of the globalization. + return nil, errors.New("scrape config cannot be fetched, main config was not validated and loaded correctly; should not happen") + } + scfgs := make([]*ScrapeConfig, len(c.ScrapeConfigs)) jobNames := map[string]string{} for i, scfg := range c.ScrapeConfigs { - // We do these checks for library users that would not call validate in - // Unmarshal. - if err := scfg.Validate(c.GlobalConfig); err != nil { - return nil, err - } - - if _, ok := jobNames[scfg.JobName]; ok { - return nil, fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) - } jobNames[scfg.JobName] = "main config file" scfgs[i] = scfg } + + // Re-read and validate the dynamic scrape config rules. for _, pat := range c.ScrapeConfigFiles { fs, err := filepath.Glob(pat) if err != nil { @@ -327,6 +363,7 @@ func (c *Config) GetScrapeConfigs() ([]*ScrapeConfig, error) { } // UnmarshalYAML implements the yaml.Unmarshaler interface. +// NOTE: This method should not be used outside of this package. Use Load or LoadFile instead. func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { *c = DefaultConfig // We want to set c to the defaults and then overwrite it with the input. @@ -363,18 +400,18 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { } } - // Do global overrides and validate unique names. + // Do global overrides and validation. jobNames := map[string]struct{}{} for _, scfg := range c.ScrapeConfigs { if err := scfg.Validate(c.GlobalConfig); err != nil { return err } - if _, ok := jobNames[scfg.JobName]; ok { return fmt.Errorf("found multiple scrape configs with job name %q", scfg.JobName) } jobNames[scfg.JobName] = struct{}{} } + rwNames := map[string]struct{}{} for _, rwcfg := range c.RemoteWriteConfigs { if rwcfg == nil { @@ -418,6 +455,8 @@ type GlobalConfig struct { RuleQueryOffset model.Duration `yaml:"rule_query_offset,omitempty"` // File to which PromQL queries are logged. QueryLogFile string `yaml:"query_log_file,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The labels to add to any timeseries that this Prometheus instance scrapes. ExternalLabels labels.Labels `yaml:"external_labels,omitempty"` // An uncompressed response body larger than this many bytes will cause the @@ -441,6 +480,15 @@ type GlobalConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a ScrepeConfig is created programmatically. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrepeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` + // Whether to convert all scraped classic histograms into native histograms with custom buckets. + ConvertClassicHistogramsToNHCB bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` } // ScrapeProtocol represents supported protocol for scraping metrics. @@ -461,15 +509,30 @@ func (s ScrapeProtocol) Validate() error { return nil } +// HeaderMediaType returns the MIME mediaType for a particular ScrapeProtocol. +func (s ScrapeProtocol) HeaderMediaType() string { + if _, ok := ScrapeProtocolsHeaders[s]; !ok { + return "" + } + mediaType, _, err := mime.ParseMediaType(ScrapeProtocolsHeaders[s]) + if err != nil { + return "" + } + return mediaType +} + var ( PrometheusProto ScrapeProtocol = "PrometheusProto" PrometheusText0_0_4 ScrapeProtocol = "PrometheusText0.0.4" + PrometheusText1_0_0 ScrapeProtocol = "PrometheusText1.0.0" OpenMetricsText0_0_1 ScrapeProtocol = "OpenMetricsText0.0.1" OpenMetricsText1_0_0 ScrapeProtocol = "OpenMetricsText1.0.0" + UTF8NamesHeader string = model.EscapingKey + "=" + model.AllowUTF8 ScrapeProtocolsHeaders = map[ScrapeProtocol]string{ PrometheusProto: "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", PrometheusText0_0_4: "text/plain;version=0.0.4", + PrometheusText1_0_0: "text/plain;version=1.0.0", OpenMetricsText0_0_1: "application/openmetrics-text;version=0.0.1", OpenMetricsText1_0_0: "application/openmetrics-text;version=1.0.0", } @@ -479,6 +542,7 @@ var ( DefaultScrapeProtocols = []ScrapeProtocol{ OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } @@ -490,6 +554,7 @@ var ( PrometheusProto, OpenMetricsText1_0_0, OpenMetricsText0_0_1, + PrometheusText1_0_0, PrometheusText0_0_4, } ) @@ -515,6 +580,7 @@ func validateAcceptScrapeProtocols(sps []ScrapeProtocol) error { // SetDirectory joins any relative file paths with dir. func (c *GlobalConfig) SetDirectory(dir string) { c.QueryLogFile = config.JoinDir(dir, c.QueryLogFile) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -577,7 +643,9 @@ func (c *GlobalConfig) isZero() bool { c.EvaluationInterval == 0 && c.RuleQueryOffset == 0 && c.QueryLogFile == "" && - c.ScrapeProtocols == nil + c.ScrapeFailureLogFile == "" && + c.ScrapeProtocols == nil && + !c.ConvertClassicHistogramsToNHCB } // RuntimeConfig configures the values for the process behavior. @@ -614,10 +682,19 @@ type ScrapeConfig struct { // The protocols to negotiate during a scrape. It tells clients what // protocol are accepted by Prometheus and with what preference (most wanted is first). // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, - // OpenMetricsText1.0.0, PrometheusText0.0.4. + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. ScrapeProtocols []ScrapeProtocol `yaml:"scrape_protocols,omitempty"` - // Whether to scrape a classic histogram that is also exposed as a native histogram. - ScrapeClassicHistograms bool `yaml:"scrape_classic_histograms,omitempty"` + // The fallback protocol to use if the Content-Type provided by the target + // is not provided, blank, or not one of the expected values. + // Supported values (case sensitive): PrometheusProto, OpenMetricsText0.0.1, + // OpenMetricsText1.0.0, PrometheusText1.0.0, PrometheusText0.0.4. + ScrapeFallbackProtocol ScrapeProtocol `yaml:"fallback_scrape_protocol,omitempty"` + // Whether to scrape a classic histogram, even if it is also exposed as a native histogram. + AlwaysScrapeClassicHistograms bool `yaml:"always_scrape_classic_histograms,omitempty"` + // Whether to convert all scraped classic histograms into a native histogram with custom buckets. + ConvertClassicHistogramsToNHCB *bool `yaml:"convert_classic_histograms_to_nhcb,omitempty"` + // File to which scrape failures are logged. + ScrapeFailureLogFile string `yaml:"scrape_failure_log_file,omitempty"` // The HTTP resource path on which to fetch metrics from targets. MetricsPath string `yaml:"metrics_path,omitempty"` // The URL scheme with which to fetch metrics from targets. @@ -651,6 +728,13 @@ type ScrapeConfig struct { // Keep no more than this many dropped targets per job. // 0 means no limit. KeepDroppedTargets uint `yaml:"keep_dropped_targets,omitempty"` + // Allow UTF8 Metric and Label Names. Can be blank in config files but must + // have a value if a ScrepeConfig is created programmatically. + MetricNameValidationScheme string `yaml:"metric_name_validation_scheme,omitempty"` + // Metric name escaping mode to request through content negotiation. Can be + // blank in config files but must have a value if a ScrepeConfig is created + // programmatically. + MetricNameEscapingScheme string `yaml:"metric_name_escaping_scheme,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -668,6 +752,7 @@ type ScrapeConfig struct { func (c *ScrapeConfig) SetDirectory(dir string) { c.ServiceDiscoveryConfigs.SetDirectory(dir) c.HTTPClientConfig.SetDirectory(dir) + c.ScrapeFailureLogFile = config.JoinDir(dir, c.ScrapeFailureLogFile) } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -749,6 +834,9 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { if c.KeepDroppedTargets == 0 { c.KeepDroppedTargets = globalConfig.KeepDroppedTargets } + if c.ScrapeFailureLogFile == "" { + c.ScrapeFailureLogFile = globalConfig.ScrapeFailureLogFile + } if c.ScrapeProtocols == nil { c.ScrapeProtocols = globalConfig.ScrapeProtocols @@ -757,6 +845,65 @@ func (c *ScrapeConfig) Validate(globalConfig GlobalConfig) error { return fmt.Errorf("%w for scrape config with job name %q", err, c.JobName) } + if c.ScrapeFallbackProtocol != "" { + if err := c.ScrapeFallbackProtocol.Validate(); err != nil { + return fmt.Errorf("invalid fallback_scrape_protocol for scrape config with job name %q: %w", c.JobName, err) + } + } + + //nolint:staticcheck + if model.NameValidationScheme != model.UTF8Validation { + return errors.New("model.NameValidationScheme must be set to UTF8") + } + + switch globalConfig.MetricNameValidationScheme { + case "": + globalConfig.MetricNameValidationScheme = UTF8ValidationConfig + case LegacyValidationConfig, UTF8ValidationConfig: + default: + return fmt.Errorf("unknown global name validation method specified, must be either 'legacy' or 'utf8', got %s", globalConfig.MetricNameValidationScheme) + } + // Scrapeconfig validation scheme matches global if left blank. + switch c.MetricNameValidationScheme { + case "": + c.MetricNameValidationScheme = globalConfig.MetricNameValidationScheme + case LegacyValidationConfig, UTF8ValidationConfig: + default: + return fmt.Errorf("unknown scrape config name validation method specified, must be either 'legacy' or 'utf8', got %s", c.MetricNameValidationScheme) + } + + // Escaping scheme is based on the validation scheme if left blank. + switch globalConfig.MetricNameEscapingScheme { + case "": + if globalConfig.MetricNameValidationScheme == LegacyValidationConfig { + globalConfig.MetricNameEscapingScheme = model.EscapeUnderscores + } else { + globalConfig.MetricNameEscapingScheme = model.AllowUTF8 + } + case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown global name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, globalConfig.MetricNameValidationScheme) + } + + if c.MetricNameEscapingScheme == "" { + c.MetricNameEscapingScheme = globalConfig.MetricNameEscapingScheme + } + + switch c.MetricNameEscapingScheme { + case model.AllowUTF8: + if c.MetricNameValidationScheme != UTF8ValidationConfig { + return errors.New("utf8 metric names requested but validation scheme is not set to UTF8") + } + case model.EscapeUnderscores, model.EscapeDots, model.EscapeValues: + default: + return fmt.Errorf("unknown scrape config name escaping method specified, must be one of '%s', '%s', '%s', or '%s', got %s", model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues, c.MetricNameValidationScheme) + } + + if c.ConvertClassicHistogramsToNHCB == nil { + global := globalConfig.ConvertClassicHistogramsToNHCB + c.ConvertClassicHistogramsToNHCB = &global + } + return nil } @@ -765,6 +912,25 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { return discovery.MarshalYAMLWithInlineConfigs(c) } +// ToValidationScheme returns the validation scheme for the given string config value. +func ToValidationScheme(s string) (validationScheme model.ValidationScheme, err error) { + switch s { + case UTF8ValidationConfig: + validationScheme = model.UTF8Validation + case LegacyValidationConfig: + validationScheme = model.LegacyValidation + default: + return model.UTF8Validation, fmt.Errorf("invalid metric name validation scheme, %s", s) + } + + return validationScheme, nil +} + +// ConvertClassicHistogramsToNHCBEnabled returns whether to convert classic histograms to NHCB. +func (c *ScrapeConfig) ConvertClassicHistogramsToNHCBEnabled() bool { + return c.ConvertClassicHistogramsToNHCB != nil && *c.ConvertClassicHistogramsToNHCB +} + // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` @@ -919,6 +1085,7 @@ func (a AlertmanagerConfigs) ToMap() map[string]*AlertmanagerConfig { // AlertmanagerAPIVersion represents a version of the // github.com/prometheus/alertmanager/api, e.g. 'v1' or 'v2'. +// 'v1' is no longer supported. type AlertmanagerAPIVersion string // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -948,7 +1115,7 @@ const ( ) var SupportedAlertmanagerAPIVersions = []AlertmanagerAPIVersion{ - AlertmanagerAPIVersionV1, AlertmanagerAPIVersionV2, + AlertmanagerAPIVersionV2, } // AlertmanagerConfig configures how Alertmanagers can be discovered and communicated with. @@ -1000,7 +1167,7 @@ func (c *AlertmanagerConfig) UnmarshalYAML(unmarshal func(interface{}) error) er c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil if httpClientConfigAuthEnabled && c.SigV4Config != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") + return errors.New("at most one of basic_auth, authorization, oauth2, & sigv4 must be configured") } // Check for users putting URLs in target groups. @@ -1109,6 +1276,7 @@ type RemoteWriteConfig struct { Name string `yaml:"name,omitempty"` SendExemplars bool `yaml:"send_exemplars,omitempty"` SendNativeHistograms bool `yaml:"send_native_histograms,omitempty"` + RoundRobinDNS bool `yaml:"round_robin_dns,omitempty"` // ProtobufMessage specifies the protobuf message to use against the remote // receiver as specified in https://prometheus.io/docs/specs/remote_write_spec_2_0/ ProtobufMessage RemoteWriteProtoMsg `yaml:"protobuf_message,omitempty"` @@ -1120,6 +1288,7 @@ type RemoteWriteConfig struct { MetadataConfig MetadataConfig `yaml:"metadata_config,omitempty"` SigV4Config *sigv4.SigV4Config `yaml:"sigv4,omitempty"` AzureADConfig *azuread.AzureADConfig `yaml:"azuread,omitempty"` + GoogleIAMConfig *googleiam.Config `yaml:"google_iam,omitempty"` } // SetDirectory joins any relative file paths with dir. @@ -1157,17 +1326,33 @@ func (c *RemoteWriteConfig) UnmarshalYAML(unmarshal func(interface{}) error) err return err } - httpClientConfigAuthEnabled := c.HTTPClientConfig.BasicAuth != nil || - c.HTTPClientConfig.Authorization != nil || c.HTTPClientConfig.OAuth2 != nil + return validateAuthConfigs(c) +} - if httpClientConfigAuthEnabled && (c.SigV4Config != nil || c.AzureADConfig != nil) { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") +// validateAuthConfigs validates that at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. +func validateAuthConfigs(c *RemoteWriteConfig) error { + var authConfigured []string + if c.HTTPClientConfig.BasicAuth != nil { + authConfigured = append(authConfigured, "basic_auth") } - - if c.SigV4Config != nil && c.AzureADConfig != nil { - return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, & azuread must be configured") + if c.HTTPClientConfig.Authorization != nil { + authConfigured = append(authConfigured, "authorization") + } + if c.HTTPClientConfig.OAuth2 != nil { + authConfigured = append(authConfigured, "oauth2") + } + if c.SigV4Config != nil { + authConfigured = append(authConfigured, "sigv4") + } + if c.AzureADConfig != nil { + authConfigured = append(authConfigured, "azuread") + } + if c.GoogleIAMConfig != nil { + authConfigured = append(authConfigured, "google_iam") + } + if len(authConfigured) > 1 { + return fmt.Errorf("at most one of basic_auth, authorization, oauth2, sigv4, azuread or google_iam must be configured. Currently configured: %v", authConfigured) } - return nil } @@ -1186,7 +1371,7 @@ func validateHeadersForTracing(headers map[string]string) error { func validateHeaders(headers map[string]string) error { for header := range headers { if strings.ToLower(header) == "authorization" { - return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, or azuread parameter") + return errors.New("authorization header must be changed via the basic_auth, authorization, oauth2, sigv4, azuread or google_iam parameter") } if _, ok := reservedHeaders[strings.ToLower(header)]; ok { return fmt.Errorf("%s is a reserved header. It must not be changed", header) @@ -1234,13 +1419,20 @@ type MetadataConfig struct { MaxSamplesPerSend int `yaml:"max_samples_per_send,omitempty"` } +const ( + // DefaultChunkedReadLimit is the default value for the maximum size of the protobuf frame client allows. + // 50MB is the default. This is equivalent to ~100k full XOR chunks and average labelset. + DefaultChunkedReadLimit = 5e+7 +) + // RemoteReadConfig is the configuration for reading from remote storage. type RemoteReadConfig struct { - URL *config.URL `yaml:"url"` - RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` - Headers map[string]string `yaml:"headers,omitempty"` - ReadRecent bool `yaml:"read_recent,omitempty"` - Name string `yaml:"name,omitempty"` + URL *config.URL `yaml:"url"` + RemoteTimeout model.Duration `yaml:"remote_timeout,omitempty"` + ChunkedReadLimit uint64 `yaml:"chunked_read_limit,omitempty"` + Headers map[string]string `yaml:"headers,omitempty"` + ReadRecent bool `yaml:"read_recent,omitempty"` + Name string `yaml:"name,omitempty"` // We cannot do proper Go type embedding below as the parser will then parse // values arbitrarily into the overflow maps of further-down types. @@ -1305,3 +1497,66 @@ func getGoGCEnv() int { } return DefaultRuntimeConfig.GoGC } + +type translationStrategyOption string + +var ( + // NoUTF8EscapingWithSuffixes will accept metric/label names as they are. + // Unit and type suffixes may be added to metric names, according to certain rules. + NoUTF8EscapingWithSuffixes translationStrategyOption = "NoUTF8EscapingWithSuffixes" + // UnderscoreEscapingWithSuffixes is the default option for translating OTLP to Prometheus. + // This option will translate metric name characters that are not alphanumerics/underscores/colons to underscores, + // and label name characters that are not alphanumerics/underscores to underscores. + // Unit and type suffixes may be appended to metric names, according to certain rules. + UnderscoreEscapingWithSuffixes translationStrategyOption = "UnderscoreEscapingWithSuffixes" + // NoTranslation (EXPERIMENTAL): disables all translation of incoming metric + // and label names. This offers a way for the OTLP users to use native metric names, reducing confusion. + // + // WARNING: This setting has significant known risks and limitations (see + // https://prometheus.io/docs/practices/naming/ for details): + // * Impaired UX when using PromQL in plain YAML (e.g. alerts, rules, dashboard, autoscaling configuration). + // * Series collisions which in the best case may result in OOO errors, in the worst case a silently malformed + // time series. For instance, you may end up in situation of ingesting `foo.bar` series with unit + // `seconds` and a separate series `foo.bar` with unit `milliseconds`. + // + // As a result, this setting is experimental and currently, should not be used in + // production systems. + // + // TODO(ArthurSens): Mention `type-and-unit-labels` feature (https://github.com/prometheus/proposals/pull/39) once released, as potential mitigation of the above risks. + NoTranslation translationStrategyOption = "NoTranslation" +) + +// OTLPConfig is the configuration for writing to the OTLP endpoint. +type OTLPConfig struct { + PromoteResourceAttributes []string `yaml:"promote_resource_attributes,omitempty"` + TranslationStrategy translationStrategyOption `yaml:"translation_strategy,omitempty"` + KeepIdentifyingResourceAttributes bool `yaml:"keep_identifying_resource_attributes,omitempty"` + ConvertHistogramsToNHCB bool `yaml:"convert_histograms_to_nhcb,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *OTLPConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultOTLPConfig + type plain OTLPConfig + if err := unmarshal((*plain)(c)); err != nil { + return err + } + + seen := map[string]struct{}{} + var err error + for i, attr := range c.PromoteResourceAttributes { + attr = strings.TrimSpace(attr) + if attr == "" { + err = errors.Join(err, errors.New("empty promoted OTel resource attribute")) + continue + } + if _, exists := seen[attr]; exists { + err = errors.Join(err, fmt.Errorf("duplicated promoted OTel resource attribute %q", attr)) + continue + } + + seen[attr] = struct{}{} + c.PromoteResourceAttributes[i] = attr + } + return err +} diff --git a/vendor/github.com/prometheus/prometheus/config/reload.go b/vendor/github.com/prometheus/prometheus/config/reload.go new file mode 100644 index 0000000000..cc0cc97158 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/config/reload.go @@ -0,0 +1,93 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + + promconfig "github.com/prometheus/common/config" + "gopkg.in/yaml.v2" +) + +type ExternalFilesConfig struct { + RuleFiles []string `yaml:"rule_files"` + ScrapeConfigFiles []string `yaml:"scrape_config_files"` +} + +// GenerateChecksum generates a checksum of the YAML file and the files it references. +func GenerateChecksum(yamlFilePath string) (string, error) { + hash := sha256.New() + + yamlContent, err := os.ReadFile(yamlFilePath) + if err != nil { + return "", fmt.Errorf("error reading YAML file: %w", err) + } + _, err = hash.Write(yamlContent) + if err != nil { + return "", fmt.Errorf("error writing YAML file to hash: %w", err) + } + + var config ExternalFilesConfig + if err := yaml.Unmarshal(yamlContent, &config); err != nil { + return "", fmt.Errorf("error unmarshalling YAML: %w", err) + } + + dir := filepath.Dir(yamlFilePath) + + for i, file := range config.RuleFiles { + config.RuleFiles[i] = promconfig.JoinDir(dir, file) + } + for i, file := range config.ScrapeConfigFiles { + config.ScrapeConfigFiles[i] = promconfig.JoinDir(dir, file) + } + + files := map[string][]string{ + "r": config.RuleFiles, // "r" for rule files + "s": config.ScrapeConfigFiles, // "s" for scrape config files + } + + for _, prefix := range []string{"r", "s"} { + for _, pattern := range files[prefix] { + matchingFiles, err := filepath.Glob(pattern) + if err != nil { + return "", fmt.Errorf("error finding files with pattern %q: %w", pattern, err) + } + + for _, file := range matchingFiles { + // Write prefix to the hash ("r" or "s") followed by \0, then + // the file path. + _, err = hash.Write([]byte(prefix + "\x00" + file + "\x00")) + if err != nil { + return "", fmt.Errorf("error writing %q path to hash: %w", file, err) + } + + // Read and hash the content of the file. + content, err := os.ReadFile(file) + if err != nil { + return "", fmt.Errorf("error reading file %s: %w", file, err) + } + _, err = hash.Write(append(content, []byte("\x00")...)) + if err != nil { + return "", fmt.Errorf("error writing %q content to hash: %w", file, err) + } + } + } + } + + return hex.EncodeToString(hash.Sum(nil)), nil +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/README.md b/vendor/github.com/prometheus/prometheus/discovery/README.md index 4c06608625..d5418e7fb1 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/README.md +++ b/vendor/github.com/prometheus/prometheus/discovery/README.md @@ -233,7 +233,7 @@ type Config interface { } type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger // A registerer for the Discoverer's metrics. Registerer prometheus.Registerer diff --git a/vendor/github.com/prometheus/prometheus/discovery/discoverer_metrics_noop.go b/vendor/github.com/prometheus/prometheus/discovery/discoverer_metrics_noop.go index 638317ace1..4321204b6c 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discoverer_metrics_noop.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discoverer_metrics_noop.go @@ -13,7 +13,7 @@ package discovery -// Create a dummy metrics struct, because this SD doesn't have any metrics. +// NoopDiscovererMetrics creates a dummy metrics struct, because this SD doesn't have any metrics. type NoopDiscovererMetrics struct{} var _ DiscovererMetrics = (*NoopDiscovererMetrics)(nil) diff --git a/vendor/github.com/prometheus/prometheus/discovery/discovery.go b/vendor/github.com/prometheus/prometheus/discovery/discovery.go index a5826f8176..c400de3632 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/discovery.go +++ b/vendor/github.com/prometheus/prometheus/discovery/discovery.go @@ -15,9 +15,9 @@ package discovery import ( "context" + "log/slog" "reflect" - "github.com/go-kit/log" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" @@ -39,7 +39,7 @@ type Discoverer interface { Run(ctx context.Context, up chan<- []*targetgroup.Group) } -// Internal metrics of service discovery mechanisms. +// DiscovererMetrics are internal metrics of service discovery mechanisms. type DiscovererMetrics interface { Register() error Unregister() @@ -47,7 +47,7 @@ type DiscovererMetrics interface { // DiscovererOptions provides options for a Discoverer. type DiscovererOptions struct { - Logger log.Logger + Logger *slog.Logger Metrics DiscovererMetrics @@ -56,7 +56,7 @@ type DiscovererOptions struct { HTTPClientOptions []config.HTTPClientOption } -// Metrics used by the "refresh" package. +// RefreshMetrics are used by the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetrics struct { @@ -64,17 +64,18 @@ type RefreshMetrics struct { Duration prometheus.Observer } -// Instantiate the metrics used by the "refresh" package. +// RefreshMetricsInstantiator instantiates the metrics used by the "refresh" package. type RefreshMetricsInstantiator interface { Instantiate(mech string) *RefreshMetrics } -// An interface for registering, unregistering, and instantiating metrics for the "refresh" package. -// Refresh metrics are registered and unregistered outside of the service discovery mechanism. -// This is so that the same metrics can be reused across different service discovery mechanisms. -// To manage refresh metrics inside the SD mechanism, we'd need to use const labels which are -// specific to that SD. However, doing so would also expose too many unused metrics on -// the Prometheus /metrics endpoint. +// RefreshMetricsManager is an interface for registering, unregistering, and +// instantiating metrics for the "refresh" package. Refresh metrics are +// registered and unregistered outside of the service discovery mechanism. This +// is so that the same metrics can be reused across different service discovery +// mechanisms. To manage refresh metrics inside the SD mechanism, we'd need to +// use const labels which are specific to that SD. However, doing so would also +// expose too many unused metrics on the Prometheus /metrics endpoint. type RefreshMetricsManager interface { DiscovererMetrics RefreshMetricsInstantiator @@ -108,7 +109,7 @@ func (c *Configs) SetDirectory(dir string) { // UnmarshalYAML implements yaml.Unmarshaler. func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -123,7 +124,7 @@ func (c *Configs) UnmarshalYAML(unmarshal func(interface{}) error) error { // MarshalYAML implements yaml.Marshaler. func (c Configs) MarshalYAML() (interface{}, error) { - cfgTyp := getConfigType(configsType) + cfgTyp := reflect.StructOf(configFields) cfgPtr := reflect.New(cfgTyp) cfgVal := cfgPtr.Elem() @@ -145,7 +146,8 @@ func (c StaticConfig) NewDiscoverer(DiscovererOptions) (Discoverer, error) { return staticDiscoverer(c), nil } -// No metrics are needed for this service discovery mechanism. +// NewDiscovererMetrics returns NoopDiscovererMetrics because no metrics are +// needed for this service discovery mechanism. func (c StaticConfig) NewDiscovererMetrics(prometheus.Registerer, RefreshMetricsInstantiator) DiscovererMetrics { return &NoopDiscovererMetrics{} } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go index c7a60ae6d3..14d3bc7a99 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpoints.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,7 +33,7 @@ import ( // Endpoints discovers new endpoint targets. type Endpoints struct { - logger log.Logger + logger *slog.Logger endpointsInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -49,9 +49,9 @@ type Endpoints struct { } // NewEndpoints returns a new endpoints discovery. -func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { +func NewEndpoints(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *Endpoints { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epAddCount := eventCount.WithLabelValues(RoleEndpoint.String(), MetricLabelRoleAdd) @@ -92,26 +92,23 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoints event handler.", "err", err) + l.Error("Error adding endpoints event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - ep := &apiv1.Endpoints{} - ep.Namespace = svc.Namespace - ep.Name = svc.Name - obj, exists, err := e.endpointsStore.Get(ep) + obj, exists, err := e.endpointsStore.GetByKey(namespacedName(svc.Namespace, svc.Name)) if exists && err == nil { e.enqueue(obj.(*apiv1.Endpoints)) } if err != nil { - level.Error(e.logger).Log("msg", "retrieving endpoints failed", "err", err) + e.logger.Error("retrieving endpoints failed", "err", err) } } _, err = e.serviceInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -131,7 +128,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } _, err = e.podInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, cur interface{}) { @@ -154,7 +151,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if e.withNodeMetadata { _, err = e.nodeInf.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -167,12 +164,15 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca e.enqueueNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - e.enqueueNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -182,7 +182,7 @@ func NewEndpoints(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node ca func (e *Endpoints) enqueueNode(nodeName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -194,7 +194,7 @@ func (e *Endpoints) enqueueNode(nodeName string) { func (e *Endpoints) enqueuePod(podNamespacedName string) { endpoints, err := e.endpointsInf.GetIndexer().ByIndex(podIndex, podNamespacedName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for pod", "pod", podNamespacedName, "err", err) + e.logger.Error("Error getting endpoints for pod", "pod", podNamespacedName, "err", err) return } @@ -223,7 +223,7 @@ func (e *Endpoints) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpoints informer unable to sync cache") + e.logger.Error("endpoints informer unable to sync cache") } return } @@ -247,13 +247,13 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointsStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -262,7 +262,7 @@ func (e *Endpoints) process(ctx context.Context, ch chan<- []*targetgroup.Group) } eps, err := convertToEndpoints(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Endpoints object failed", "err", err) + e.logger.Error("converting to Endpoints object failed", "err", err) return true } send(ctx, ch, e.buildEndpoints(eps)) @@ -361,16 +361,19 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { if port.Port == cport.ContainerPort { ports := strconv.FormatUint(uint64(port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(port.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -397,10 +400,10 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { v := eps.Labels[apiv1.EndpointsOverCapacity] if v == "truncated" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000 and has been truncated, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } if v == "warning" { - level.Warn(e.logger).Log("msg", "Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) + e.logger.Warn("Number of endpoints in one Endpoints object exceeds 1000, please use \"role: endpointslice\" instead", "endpoint", eps.Name) } // For all seen pods, check all container ports. If they were not covered @@ -411,7 +414,8 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { @@ -428,6 +432,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -435,6 +440,7 @@ func (e *Endpoints) buildEndpoints(eps *apiv1.Endpoints) *targetgroup.Group { podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -448,13 +454,10 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -464,31 +467,27 @@ func (e *Endpoints) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { } func (e *Endpoints) addServiceLabels(ns, name string, tg *targetgroup.Group) { - svc := &apiv1.Service{} - svc.Namespace = ns - svc.Name = name - - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } -func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger log.Logger, nodeName *string) model.LabelSet { +func addNodeLabels(tg model.LabelSet, nodeInf cache.SharedInformer, logger *slog.Logger, nodeName *string) model.LabelSet { if nodeName == nil { return tg } obj, exists, err := nodeInf.GetStore().GetByKey(*nodeName) if err != nil { - level.Error(logger).Log("msg", "Error getting node", "node", *nodeName, "err", err) + logger.Error("Error getting node", "node", *nodeName, "err", err) return tg } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go index 7a70255c12..1002025128 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice.go @@ -16,17 +16,15 @@ package kubernetes import ( "context" "errors" - "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -36,7 +34,7 @@ import ( // EndpointSlice discovers new endpoint targets. type EndpointSlice struct { - logger log.Logger + logger *slog.Logger endpointSliceInf cache.SharedIndexInformer serviceInf cache.SharedInformer @@ -52,9 +50,9 @@ type EndpointSlice struct { } // NewEndpointSlice returns a new endpointslice discovery. -func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { +func NewEndpointSlice(l *slog.Logger, eps cache.SharedIndexInformer, svc, pod, node cache.SharedInformer, eventCount *prometheus.CounterVec) *EndpointSlice { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } epslAddCount := eventCount.WithLabelValues(RoleEndpointSlice.String(), MetricLabelRoleAdd) @@ -93,27 +91,28 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding endpoint slices event handler.", "err", err) + l.Error("Error adding endpoint slices event handler.", "err", err) } serviceUpdate := func(o interface{}) { svc, err := convertToService(o) if err != nil { - level.Error(e.logger).Log("msg", "converting to Service object failed", "err", err) + e.logger.Error("converting to Service object failed", "err", err) return } - // TODO(brancz): use cache.Indexer to index endpoints by - // disv1beta1.LabelServiceName so this operation doesn't have to - // iterate over all endpoint objects. + // TODO(brancz): use cache.Indexer to index endpointslices by + // LabelServiceName so this operation doesn't have to iterate over all + // endpoint objects. for _, obj := range e.endpointSliceStore.List() { - esa, err := e.getEndpointSliceAdaptor(obj) - if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) + es, ok := obj.(*v1.EndpointSlice) + if !ok { + e.logger.Error("converting to EndpointSlice object failed", "err", err) continue } - if lv, exists := esa.labels()[esa.labelServiceName()]; exists && lv == svc.Name { - e.enqueue(esa.get()) + // Only consider the underlying EndpointSlices in the same namespace. + if svcName, exists := es.Labels[v1.LabelServiceName]; exists && svcName == svc.Name && es.Namespace == svc.Namespace { + e.enqueue(es) } } } @@ -132,7 +131,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } if e.withNodeMetadata { @@ -146,12 +145,15 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod e.enqueueNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - e.enqueueNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + e.enqueueNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } } @@ -161,7 +163,7 @@ func NewEndpointSlice(l log.Logger, eps cache.SharedIndexInformer, svc, pod, nod func (e *EndpointSlice) enqueueNode(nodeName string) { endpoints, err := e.endpointSliceInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(e.logger).Log("msg", "Error getting endpoints for node", "node", nodeName, "err", err) + e.logger.Error("Error getting endpoints for node", "node", nodeName, "err", err) return } @@ -189,7 +191,7 @@ func (e *EndpointSlice) Run(ctx context.Context, ch chan<- []*targetgroup.Group) } if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(e.logger).Log("msg", "endpointslice informer unable to sync cache") + e.logger.Error("endpointslice informer unable to sync cache") } return } @@ -213,13 +215,13 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - level.Error(e.logger).Log("msg", "splitting key failed", "key", key) + e.logger.Error("splitting key failed", "key", key) return true } o, exists, err := e.endpointSliceStore.GetByKey(key) if err != nil { - level.Error(e.logger).Log("msg", "getting object from store failed", "key", key) + e.logger.Error("getting object from store failed", "key", key) return true } if !exists { @@ -227,29 +229,17 @@ func (e *EndpointSlice) process(ctx context.Context, ch chan<- []*targetgroup.Gr return true } - esa, err := e.getEndpointSliceAdaptor(o) - if err != nil { - level.Error(e.logger).Log("msg", "converting to EndpointSlice object failed", "err", err) - return true + if es, ok := o.(*v1.EndpointSlice); ok { + send(ctx, ch, e.buildEndpointSlice(*es)) + } else { + e.logger.Error("received unexpected object", "object", o) + return false } - - send(ctx, ch, e.buildEndpointSlice(esa)) return true } -func (e *EndpointSlice) getEndpointSliceAdaptor(o interface{}) (endpointSliceAdaptor, error) { - switch endpointSlice := o.(type) { - case *v1.EndpointSlice: - return newEndpointSliceAdaptorFromV1(endpointSlice), nil - case *v1beta1.EndpointSlice: - return newEndpointSliceAdaptorFromV1beta1(endpointSlice), nil - default: - return nil, fmt.Errorf("received unexpected object: %v", o) - } -} - -func endpointSliceSource(ep endpointSliceAdaptor) string { - return endpointSliceSourceFromNamespaceAndName(ep.namespace(), ep.name()) +func endpointSliceSource(ep v1.EndpointSlice) string { + return endpointSliceSourceFromNamespaceAndName(ep.Namespace, ep.Name) } func endpointSliceSourceFromNamespaceAndName(namespace, name string) string { @@ -274,95 +264,95 @@ const ( endpointSliceEndpointTopologyLabelPresentPrefix = metaLabelPrefix + "endpointslice_endpoint_topology_present_" ) -func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgroup.Group { +func (e *EndpointSlice) buildEndpointSlice(eps v1.EndpointSlice) *targetgroup.Group { tg := &targetgroup.Group{ Source: endpointSliceSource(eps), } tg.Labels = model.LabelSet{ - namespaceLabel: lv(eps.namespace()), - endpointSliceAddressTypeLabel: lv(eps.addressType()), + namespaceLabel: lv(eps.Namespace), + endpointSliceAddressTypeLabel: lv(string(eps.AddressType)), } - addObjectMetaLabels(tg.Labels, eps.getObjectMeta(), RoleEndpointSlice) + addObjectMetaLabels(tg.Labels, eps.ObjectMeta, RoleEndpointSlice) e.addServiceLabels(eps, tg) type podEntry struct { pod *apiv1.Pod - servicePorts []endpointSlicePortAdaptor + servicePorts []v1.EndpointPort } seenPods := map[string]*podEntry{} - add := func(addr string, ep endpointSliceEndpointAdaptor, port endpointSlicePortAdaptor) { + add := func(addr string, ep v1.Endpoint, port v1.EndpointPort) { a := addr - if port.port() != nil { - a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + a = net.JoinHostPort(addr, strconv.FormatUint(uint64(*port.Port), 10)) } target := model.LabelSet{ model.AddressLabel: lv(a), } - if port.name() != nil { - target[endpointSlicePortNameLabel] = lv(*port.name()) + if port.Name != nil { + target[endpointSlicePortNameLabel] = lv(*port.Name) } - if port.protocol() != nil { - target[endpointSlicePortProtocolLabel] = lv(*port.protocol()) + if port.Protocol != nil { + target[endpointSlicePortProtocolLabel] = lv(string(*port.Protocol)) } - if port.port() != nil { - target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.port()), 10)) + if port.Port != nil { + target[endpointSlicePortLabel] = lv(strconv.FormatUint(uint64(*port.Port), 10)) } - if port.appProtocol() != nil { - target[endpointSlicePortAppProtocol] = lv(*port.appProtocol()) + if port.AppProtocol != nil { + target[endpointSlicePortAppProtocol] = lv(*port.AppProtocol) } - if ep.conditions().ready() != nil { - target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.conditions().ready())) + if ep.Conditions.Ready != nil { + target[endpointSliceEndpointConditionsReadyLabel] = lv(strconv.FormatBool(*ep.Conditions.Ready)) } - if ep.conditions().serving() != nil { - target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.conditions().serving())) + if ep.Conditions.Serving != nil { + target[endpointSliceEndpointConditionsServingLabel] = lv(strconv.FormatBool(*ep.Conditions.Serving)) } - if ep.conditions().terminating() != nil { - target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.conditions().terminating())) + if ep.Conditions.Terminating != nil { + target[endpointSliceEndpointConditionsTerminatingLabel] = lv(strconv.FormatBool(*ep.Conditions.Terminating)) } - if ep.hostname() != nil { - target[endpointSliceEndpointHostnameLabel] = lv(*ep.hostname()) + if ep.Hostname != nil { + target[endpointSliceEndpointHostnameLabel] = lv(*ep.Hostname) } - if ep.targetRef() != nil { - target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.targetRef().Kind) - target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.targetRef().Name) + if ep.TargetRef != nil { + target[model.LabelName(endpointSliceAddressTargetKindLabel)] = lv(ep.TargetRef.Kind) + target[model.LabelName(endpointSliceAddressTargetNameLabel)] = lv(ep.TargetRef.Name) } - if ep.nodename() != nil { - target[endpointSliceEndpointNodenameLabel] = lv(*ep.nodename()) + if ep.NodeName != nil { + target[endpointSliceEndpointNodenameLabel] = lv(*ep.NodeName) } - if ep.zone() != nil { - target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.zone()) + if ep.Zone != nil { + target[model.LabelName(endpointSliceEndpointZoneLabel)] = lv(*ep.Zone) } - for k, v := range ep.topology() { + for k, v := range ep.DeprecatedTopology { ln := strutil.SanitizeLabelName(k) target[model.LabelName(endpointSliceEndpointTopologyLabelPrefix+ln)] = lv(v) target[model.LabelName(endpointSliceEndpointTopologyLabelPresentPrefix+ln)] = presentValue } if e.withNodeMetadata { - if ep.targetRef() != nil && ep.targetRef().Kind == "Node" { - target = addNodeLabels(target, e.nodeInf, e.logger, &ep.targetRef().Name) + if ep.TargetRef != nil && ep.TargetRef.Kind == "Node" { + target = addNodeLabels(target, e.nodeInf, e.logger, &ep.TargetRef.Name) } else { - target = addNodeLabels(target, e.nodeInf, e.logger, ep.nodename()) + target = addNodeLabels(target, e.nodeInf, e.logger, ep.NodeName) } } - pod := e.resolvePodRef(ep.targetRef()) + pod := e.resolvePodRef(ep.TargetRef) if pod == nil { // This target is not a Pod, so don't continue with Pod specific logic. tg.Targets = append(tg.Targets, target) @@ -380,19 +370,23 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou target = target.Merge(podLabels(pod)) // Attach potential container port labels matching the endpoint port. - for _, c := range pod.Spec.Containers { + containers := append(pod.Spec.Containers, pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { - if port.port() == nil { + if port.Port == nil { continue } - if *port.port() == cport.ContainerPort { - ports := strconv.FormatUint(uint64(*port.port()), 10) + + if *port.Port == cport.ContainerPort { + ports := strconv.FormatUint(uint64(*port.Port), 10) + isInit := i >= len(pod.Spec.Containers) target[podContainerNameLabel] = lv(c.Name) target[podContainerImageLabel] = lv(c.Image) target[podContainerPortNameLabel] = lv(cport.Name) target[podContainerPortNumberLabel] = lv(ports) target[podContainerPortProtocolLabel] = lv(string(cport.Protocol)) + target[podContainerIsInit] = lv(strconv.FormatBool(isInit)) break } } @@ -404,9 +398,9 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou tg.Targets = append(tg.Targets, target) } - for _, ep := range eps.endpoints() { - for _, port := range eps.ports() { - for _, addr := range ep.addresses() { + for _, ep := range eps.Endpoints { + for _, port := range eps.Ports { + for _, addr := range ep.Addresses { add(addr, ep, port) } } @@ -420,14 +414,15 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou continue } - for _, c := range pe.pod.Spec.Containers { + containers := append(pe.pod.Spec.Containers, pe.pod.Spec.InitContainers...) + for i, c := range containers { for _, cport := range c.Ports { hasSeenPort := func() bool { for _, eport := range pe.servicePorts { - if eport.port() == nil { + if eport.Port == nil { continue } - if cport.ContainerPort == *eport.port() { + if cport.ContainerPort == *eport.Port { return true } } @@ -440,6 +435,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou a := net.JoinHostPort(pe.pod.Status.PodIP, strconv.FormatUint(uint64(cport.ContainerPort), 10)) ports := strconv.FormatUint(uint64(cport.ContainerPort), 10) + isInit := i >= len(pe.pod.Spec.Containers) target := model.LabelSet{ model.AddressLabel: lv(a), podContainerNameLabel: lv(c.Name), @@ -447,6 +443,7 @@ func (e *EndpointSlice) buildEndpointSlice(eps endpointSliceAdaptor) *targetgrou podContainerPortNameLabel: lv(cport.Name), podContainerPortNumberLabel: lv(ports), podContainerPortProtocolLabel: lv(string(cport.Protocol)), + podContainerIsInit: lv(strconv.FormatBool(isInit)), } tg.Targets = append(tg.Targets, target.Merge(podLabels(pe.pod))) } @@ -460,13 +457,10 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { if ref == nil || ref.Kind != "Pod" { return nil } - p := &apiv1.Pod{} - p.Namespace = ref.Namespace - p.Name = ref.Name - obj, exists, err := e.podStore.Get(p) + obj, exists, err := e.podStore.GetByKey(namespacedName(ref.Namespace, ref.Name)) if err != nil { - level.Error(e.logger).Log("msg", "resolving pod ref failed", "err", err) + e.logger.Error("resolving pod ref failed", "err", err) return nil } if !exists { @@ -475,29 +469,29 @@ func (e *EndpointSlice) resolvePodRef(ref *apiv1.ObjectReference) *apiv1.Pod { return obj.(*apiv1.Pod) } -func (e *EndpointSlice) addServiceLabels(esa endpointSliceAdaptor, tg *targetgroup.Group) { +func (e *EndpointSlice) addServiceLabels(esa v1.EndpointSlice, tg *targetgroup.Group) { var ( - svc = &apiv1.Service{} found bool + name string ) - svc.Namespace = esa.namespace() + ns := esa.Namespace // Every EndpointSlice object has the Service they belong to in the // kubernetes.io/service-name label. - svc.Name, found = esa.labels()[esa.labelServiceName()] + name, found = esa.Labels[v1.LabelServiceName] if !found { return } - obj, exists, err := e.serviceStore.Get(svc) + obj, exists, err := e.serviceStore.GetByKey(namespacedName(ns, name)) if err != nil { - level.Error(e.logger).Log("msg", "retrieving service failed", "err", err) + e.logger.Error("retrieving service failed", "err", err) return } if !exists { return } - svc = obj.(*apiv1.Service) + svc := obj.(*apiv1.Service) tg.Labels = tg.Labels.Merge(serviceLabels(svc)) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go deleted file mode 100644 index edd64fcb32..0000000000 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/endpointslice_adaptor.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2020 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/api/discovery/v1" - "k8s.io/api/discovery/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// endpointSliceAdaptor is an adaptor for the different EndpointSlice versions. -type endpointSliceAdaptor interface { - get() interface{} - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - addressType() string - endpoints() []endpointSliceEndpointAdaptor - ports() []endpointSlicePortAdaptor - labels() map[string]string - labelServiceName() string -} - -type endpointSlicePortAdaptor interface { - name() *string - port() *int32 - protocol() *string - appProtocol() *string -} - -type endpointSliceEndpointAdaptor interface { - addresses() []string - hostname() *string - nodename() *string - zone() *string - conditions() endpointSliceEndpointConditionsAdaptor - targetRef() *corev1.ObjectReference - topology() map[string]string -} - -type endpointSliceEndpointConditionsAdaptor interface { - ready() *bool - serving() *bool - terminating() *bool -} - -// Adaptor for k8s.io/api/discovery/v1. -type endpointSliceAdaptorV1 struct { - endpointSlice *v1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1(endpointSlice *v1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1) name() string { - return e.endpointSlice.ObjectMeta.Name -} - -func (e *endpointSliceAdaptorV1) namespace() string { - return e.endpointSlice.ObjectMeta.Namespace -} - -func (e *endpointSliceAdaptorV1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1) labelServiceName() string { - return v1.LabelServiceName -} - -// Adaptor for k8s.io/api/discovery/v1beta1. -type endpointSliceAdaptorV1Beta1 struct { - endpointSlice *v1beta1.EndpointSlice -} - -func newEndpointSliceAdaptorFromV1beta1(endpointSlice *v1beta1.EndpointSlice) endpointSliceAdaptor { - return &endpointSliceAdaptorV1Beta1{endpointSlice: endpointSlice} -} - -func (e *endpointSliceAdaptorV1Beta1) get() interface{} { - return e.endpointSlice -} - -func (e *endpointSliceAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { - return e.endpointSlice.ObjectMeta -} - -func (e *endpointSliceAdaptorV1Beta1) name() string { - return e.endpointSlice.Name -} - -func (e *endpointSliceAdaptorV1Beta1) namespace() string { - return e.endpointSlice.Namespace -} - -func (e *endpointSliceAdaptorV1Beta1) addressType() string { - return string(e.endpointSlice.AddressType) -} - -func (e *endpointSliceAdaptorV1Beta1) endpoints() []endpointSliceEndpointAdaptor { - eps := make([]endpointSliceEndpointAdaptor, 0, len(e.endpointSlice.Endpoints)) - for i := 0; i < len(e.endpointSlice.Endpoints); i++ { - eps = append(eps, newEndpointSliceEndpointAdaptorFromV1beta1(e.endpointSlice.Endpoints[i])) - } - return eps -} - -func (e *endpointSliceAdaptorV1Beta1) ports() []endpointSlicePortAdaptor { - ports := make([]endpointSlicePortAdaptor, 0, len(e.endpointSlice.Ports)) - for i := 0; i < len(e.endpointSlice.Ports); i++ { - ports = append(ports, newEndpointSlicePortAdaptorFromV1beta1(e.endpointSlice.Ports[i])) - } - return ports -} - -func (e *endpointSliceAdaptorV1Beta1) labels() map[string]string { - return e.endpointSlice.Labels -} - -func (e *endpointSliceAdaptorV1Beta1) labelServiceName() string { - return v1beta1.LabelServiceName -} - -type endpointSliceEndpointAdaptorV1 struct { - endpoint v1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1(endpoint v1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1) zone() *string { - return e.endpoint.Zone -} - -func (e *endpointSliceEndpointAdaptorV1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1) topology() map[string]string { - return e.endpoint.DeprecatedTopology -} - -type endpointSliceEndpointConditionsAdaptorV1 struct { - endpointConditions v1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1(endpointConditions v1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1) terminating() *bool { - return e.endpointConditions.Terminating -} - -type endpointSliceEndpointAdaptorV1beta1 struct { - endpoint v1beta1.Endpoint -} - -func newEndpointSliceEndpointAdaptorFromV1beta1(endpoint v1beta1.Endpoint) endpointSliceEndpointAdaptor { - return &endpointSliceEndpointAdaptorV1beta1{endpoint: endpoint} -} - -func (e *endpointSliceEndpointAdaptorV1beta1) addresses() []string { - return e.endpoint.Addresses -} - -func (e *endpointSliceEndpointAdaptorV1beta1) hostname() *string { - return e.endpoint.Hostname -} - -func (e *endpointSliceEndpointAdaptorV1beta1) nodename() *string { - return e.endpoint.NodeName -} - -func (e *endpointSliceEndpointAdaptorV1beta1) zone() *string { - return nil -} - -func (e *endpointSliceEndpointAdaptorV1beta1) conditions() endpointSliceEndpointConditionsAdaptor { - return newEndpointSliceEndpointConditionsAdaptorFromV1beta1(e.endpoint.Conditions) -} - -func (e *endpointSliceEndpointAdaptorV1beta1) targetRef() *corev1.ObjectReference { - return e.endpoint.TargetRef -} - -func (e *endpointSliceEndpointAdaptorV1beta1) topology() map[string]string { - return e.endpoint.Topology -} - -type endpointSliceEndpointConditionsAdaptorV1beta1 struct { - endpointConditions v1beta1.EndpointConditions -} - -func newEndpointSliceEndpointConditionsAdaptorFromV1beta1(endpointConditions v1beta1.EndpointConditions) endpointSliceEndpointConditionsAdaptor { - return &endpointSliceEndpointConditionsAdaptorV1beta1{endpointConditions: endpointConditions} -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) ready() *bool { - return e.endpointConditions.Ready -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) serving() *bool { - return e.endpointConditions.Serving -} - -func (e *endpointSliceEndpointConditionsAdaptorV1beta1) terminating() *bool { - return e.endpointConditions.Terminating -} - -type endpointSlicePortAdaptorV1 struct { - endpointPort v1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1(port v1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1) appProtocol() *string { - return e.endpointPort.AppProtocol -} - -type endpointSlicePortAdaptorV1beta1 struct { - endpointPort v1beta1.EndpointPort -} - -func newEndpointSlicePortAdaptorFromV1beta1(port v1beta1.EndpointPort) endpointSlicePortAdaptor { - return &endpointSlicePortAdaptorV1beta1{endpointPort: port} -} - -func (e *endpointSlicePortAdaptorV1beta1) name() *string { - return e.endpointPort.Name -} - -func (e *endpointSlicePortAdaptorV1beta1) port() *int32 { - return e.endpointPort.Port -} - -func (e *endpointSlicePortAdaptorV1beta1) protocol() *string { - val := string(*e.endpointPort.Protocol) - return &val -} - -func (e *endpointSlicePortAdaptorV1beta1) appProtocol() *string { - return e.endpointPort.AppProtocol -} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go index 7b6366b257..0de574471f 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress.go @@ -17,14 +17,12 @@ import ( "context" "errors" "fmt" + "log/slog" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,14 +31,14 @@ import ( // Ingress implements discovery of Kubernetes ingress. type Ingress struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewIngress returns a new ingress discovery. -func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { +func NewIngress(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Ingress { ingressAddCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleAdd) ingressUpdateCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleUpdate) ingressDeleteCount := eventCount.WithLabelValues(RoleIngress.String(), MetricLabelRoleDelete) @@ -67,7 +65,7 @@ func NewIngress(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding ingresses event handler.", "err", err) + l.Error("Error adding ingresses event handler.", "err", err) } return s } @@ -87,7 +85,7 @@ func (i *Ingress) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), i.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(i.logger).Log("msg", "ingress informer unable to sync cache") + i.logger.Error("ingress informer unable to sync cache") } return } @@ -123,23 +121,18 @@ func (i *Ingress) process(ctx context.Context, ch chan<- []*targetgroup.Group) b return true } - var ia ingressAdaptor - switch ingress := o.(type) { - case *v1.Ingress: - ia = newIngressAdaptorFromV1(ingress) - case *v1beta1.Ingress: - ia = newIngressAdaptorFromV1beta1(ingress) - default: - level.Error(i.logger).Log("msg", "converting to Ingress object failed", "err", + if ingress, ok := o.(*v1.Ingress); ok { + send(ctx, ch, i.buildIngress(*ingress)) + } else { + i.logger.Error("converting to Ingress object failed", "err", fmt.Errorf("received unexpected object: %v", o)) return true } - send(ctx, ch, i.buildIngress(ia)) return true } -func ingressSource(s ingressAdaptor) string { - return ingressSourceFromNamespaceAndName(s.namespace(), s.name()) +func ingressSource(s v1.Ingress) string { + return ingressSourceFromNamespaceAndName(s.Namespace, s.Name) } func ingressSourceFromNamespaceAndName(namespace, name string) string { @@ -153,15 +146,15 @@ const ( ingressClassNameLabel = metaLabelPrefix + "ingress_class_name" ) -func ingressLabels(ingress ingressAdaptor) model.LabelSet { +func ingressLabels(ingress v1.Ingress) model.LabelSet { // Each label and annotation will create two key-value pairs in the map. ls := make(model.LabelSet) - ls[namespaceLabel] = lv(ingress.namespace()) - if cls := ingress.ingressClassName(); cls != nil { + ls[namespaceLabel] = lv(ingress.Namespace) + if cls := ingress.Spec.IngressClassName; cls != nil { ls[ingressClassNameLabel] = lv(*cls) } - addObjectMetaLabels(ls, ingress.getObjectMeta(), RoleIngress) + addObjectMetaLabels(ls, ingress.ObjectMeta, RoleIngress) return ls } @@ -181,19 +174,39 @@ func pathsFromIngressPaths(ingressPaths []string) []string { return paths } -func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { +func rulePaths(rule v1.IngressRule) []string { + rv := rule.IngressRuleValue + if rv.HTTP == nil { + return nil + } + paths := make([]string, len(rv.HTTP.Paths)) + for n, p := range rv.HTTP.Paths { + paths[n] = p.Path + } + return paths +} + +func tlsHosts(ingressTLS []v1.IngressTLS) []string { + var hosts []string + for _, tls := range ingressTLS { + hosts = append(hosts, tls.Hosts...) + } + return hosts +} + +func (i *Ingress) buildIngress(ingress v1.Ingress) *targetgroup.Group { tg := &targetgroup.Group{ Source: ingressSource(ingress), } tg.Labels = ingressLabels(ingress) - for _, rule := range ingress.rules() { + for _, rule := range ingress.Spec.Rules { scheme := "http" - paths := pathsFromIngressPaths(rule.paths()) + paths := pathsFromIngressPaths(rulePaths(rule)) out: - for _, pattern := range ingress.tlsHosts() { - if matchesHostnamePattern(pattern, rule.host()) { + for _, pattern := range tlsHosts(ingress.Spec.TLS) { + if matchesHostnamePattern(pattern, rule.Host) { scheme = "https" break out } @@ -201,9 +214,9 @@ func (i *Ingress) buildIngress(ingress ingressAdaptor) *targetgroup.Group { for _, path := range paths { tg.Targets = append(tg.Targets, model.LabelSet{ - model.AddressLabel: lv(rule.host()), + model.AddressLabel: lv(rule.Host), ingressSchemeLabel: lv(scheme), - ingressHostLabel: lv(rule.host()), + ingressHostLabel: lv(rule.Host), ingressPathLabel: lv(path), }) } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go deleted file mode 100644 index d1a7b7f2a2..0000000000 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/ingress_adaptor.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2016 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package kubernetes - -import ( - v1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ingressAdaptor is an adaptor for the different Ingress versions. -type ingressAdaptor interface { - getObjectMeta() metav1.ObjectMeta - name() string - namespace() string - labels() map[string]string - annotations() map[string]string - tlsHosts() []string - ingressClassName() *string - rules() []ingressRuleAdaptor -} - -type ingressRuleAdaptor interface { - paths() []string - host() string -} - -// Adaptor for networking.k8s.io/v1. -type ingressAdaptorV1 struct { - ingress *v1.Ingress -} - -func newIngressAdaptorFromV1(ingress *v1.Ingress) ingressAdaptor { - return &ingressAdaptorV1{ingress: ingress} -} - -func (i *ingressAdaptorV1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1 struct { - rule v1.IngressRule -} - -func newIngressRuleAdaptorFromV1(rule v1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1{rule: rule} -} - -func (i *ingressRuleAdaptorV1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1) host() string { return i.rule.Host } - -// Adaptor for networking.k8s.io/v1beta1. -type ingressAdaptorV1Beta1 struct { - ingress *v1beta1.Ingress -} - -func newIngressAdaptorFromV1beta1(ingress *v1beta1.Ingress) ingressAdaptor { - return &ingressAdaptorV1Beta1{ingress: ingress} -} -func (i *ingressAdaptorV1Beta1) getObjectMeta() metav1.ObjectMeta { return i.ingress.ObjectMeta } -func (i *ingressAdaptorV1Beta1) name() string { return i.ingress.Name } -func (i *ingressAdaptorV1Beta1) namespace() string { return i.ingress.Namespace } -func (i *ingressAdaptorV1Beta1) labels() map[string]string { return i.ingress.Labels } -func (i *ingressAdaptorV1Beta1) annotations() map[string]string { return i.ingress.Annotations } -func (i *ingressAdaptorV1Beta1) ingressClassName() *string { return i.ingress.Spec.IngressClassName } - -func (i *ingressAdaptorV1Beta1) tlsHosts() []string { - var hosts []string - for _, tls := range i.ingress.Spec.TLS { - hosts = append(hosts, tls.Hosts...) - } - return hosts -} - -func (i *ingressAdaptorV1Beta1) rules() []ingressRuleAdaptor { - var rules []ingressRuleAdaptor - for _, rule := range i.ingress.Spec.Rules { - rules = append(rules, newIngressRuleAdaptorFromV1Beta1(rule)) - } - return rules -} - -type ingressRuleAdaptorV1Beta1 struct { - rule v1beta1.IngressRule -} - -func newIngressRuleAdaptorFromV1Beta1(rule v1beta1.IngressRule) ingressRuleAdaptor { - return &ingressRuleAdaptorV1Beta1{rule: rule} -} - -func (i *ingressRuleAdaptorV1Beta1) paths() []string { - rv := i.rule.IngressRuleValue - if rv.HTTP == nil { - return nil - } - paths := make([]string, len(rv.HTTP.Paths)) - for n, p := range rv.HTTP.Paths { - paths[n] = p.Path - } - return paths -} - -func (i *ingressRuleAdaptorV1Beta1) host() string { return i.rule.Host } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go index a8b6f85899..03d9f2f449 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/kubernetes.go @@ -17,42 +17,36 @@ import ( "context" "errors" "fmt" + "log/slog" "os" "reflect" "strings" "sync" "time" - "github.com/prometheus/prometheus/util/strutil" - - disv1beta1 "k8s.io/api/discovery/v1beta1" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" "github.com/prometheus/common/version" apiv1 "k8s.io/api/core/v1" disv1 "k8s.io/api/discovery/v1" networkv1 "k8s.io/api/networking/v1" - "k8s.io/api/networking/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - utilversion "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/kubernetes" + // Required to get the GCP auth provider working. + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/clientcmd" - // Required to get the GCP auth provider working. - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/targetgroup" + "github.com/prometheus/prometheus/util/strutil" ) const ( @@ -63,14 +57,10 @@ const ( presentValue = model.LabelValue("true") ) -var ( - // Http header. - userAgent = fmt.Sprintf("Prometheus/%s", version.Version) - // DefaultSDConfig is the default Kubernetes SD configuration. - DefaultSDConfig = SDConfig{ - HTTPClientConfig: config.DefaultHTTPClientConfig, - } -) +// DefaultSDConfig is the default Kubernetes SD configuration. +var DefaultSDConfig = SDConfig{ + HTTPClientConfig: config.DefaultHTTPClientConfig, +} func init() { discovery.RegisterConfig(&SDConfig{}) @@ -177,7 +167,7 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } if c.Role == "" { - return fmt.Errorf("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") + return errors.New("role missing (one of: pod, service, endpoints, endpointslice, node, ingress)") } err = c.HTTPClientConfig.Validate() if err != nil { @@ -185,20 +175,20 @@ func (c *SDConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { } if c.APIServer.URL != nil && c.KubeConfig != "" { // Api-server and kubeconfig_file are mutually exclusive - return fmt.Errorf("cannot use 'kubeconfig_file' and 'api_server' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'api_server' simultaneously") } if c.KubeConfig != "" && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { // Kubeconfig_file and custom http config are mutually exclusive - return fmt.Errorf("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") + return errors.New("cannot use a custom HTTP client configuration together with 'kubeconfig_file'") } if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) { - return fmt.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") + return errors.New("to use custom HTTP client configuration please provide the 'api_server' URL explicitly") } if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously") } if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace { - return fmt.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") + return errors.New("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously") } foundSelectorRoles := make(map[Role]struct{}) @@ -264,7 +254,7 @@ type Discovery struct { sync.RWMutex client kubernetes.Interface role Role - logger log.Logger + logger *slog.Logger namespaceDiscovery *NamespaceDiscovery discoverers []discovery.Discoverer selectors roleSelector @@ -289,14 +279,14 @@ func (d *Discovery) getNamespaces() []string { } // New creates a new Kubernetes discovery for the given role. -func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { +func New(l *slog.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Discovery, error) { m, ok := metrics.(*kubernetesMetrics) if !ok { - return nil, fmt.Errorf("invalid discovery metrics type") + return nil, errors.New("invalid discovery metrics type") } if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } var ( kcfg *rest.Config @@ -328,7 +318,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di ownNamespace = string(ownNamespaceContents) } - level.Info(l).Log("msg", "Using pod service account via in-cluster config") + l.Info("Using pod service account via in-cluster config") default: rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd") if err != nil { @@ -340,7 +330,7 @@ func New(l log.Logger, metrics discovery.DiscovererMetrics, conf *SDConfig) (*Di } } - kcfg.UserAgent = userAgent + kcfg.UserAgent = version.PrometheusUserAgent() kcfg.ContentType = "application/vnd.kubernetes.protobuf" c, err := kubernetes.NewForConfig(kcfg) @@ -401,55 +391,22 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { switch d.role { case RoleEndpointSlice: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkDiscoveryV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedIndexInformer - if v1Supported { - e := d.client.DiscoveryV1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) - } else { - e := d.client.DiscoveryV1beta1().EndpointSlices(namespace) - elw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.endpointslice.field - options.LabelSelector = d.selectors.endpointslice.label - return e.Watch(ctx, options) - }, - } - informer = d.newEndpointSlicesByNodeInformer(elw, &disv1beta1.EndpointSlice{}) + e := d.client.DiscoveryV1().EndpointSlices(namespace) + elw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.endpointslice.field + options.LabelSelector = d.selectors.endpointslice.label + return e.Watch(ctx, options) + }, } + informer = d.newEndpointSlicesByNodeInformer(elw, &disv1.EndpointSlice{}) s := d.client.CoreV1().Services(namespace) slw := &cache.ListWatch{ @@ -483,7 +440,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go nodeInf.Run(ctx.Done()) } eps := NewEndpointSlice( - log.With(d.logger, "role", "endpointslice"), + d.logger.With("role", "endpointslice"), informer, d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -543,7 +500,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } eps := NewEndpoints( - log.With(d.logger, "role", "endpoint"), + d.logger.With("role", "endpoint"), d.newEndpointsByNodeInformer(elw), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.mustNewSharedInformer(plw, &apiv1.Pod{}, resyncDisabled), @@ -577,7 +534,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } pod := NewPod( - log.With(d.logger, "role", "pod"), + d.logger.With("role", "pod"), d.newPodsByNodeInformer(plw), nodeInformer, d.metrics.eventCount, @@ -601,7 +558,7 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { }, } svc := NewService( - log.With(d.logger, "role", "service"), + d.logger.With("role", "service"), d.mustNewSharedInformer(slw, &apiv1.Service{}, resyncDisabled), d.metrics.eventCount, ) @@ -609,57 +566,24 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { go svc.informer.Run(ctx.Done()) } case RoleIngress: - // Check "networking.k8s.io/v1" availability with retries. - // If "v1" is not available, use "networking.k8s.io/v1beta1" for backward compatibility - var v1Supported bool - if retryOnError(ctx, 10*time.Second, - func() (err error) { - v1Supported, err = checkNetworkingV1Supported(d.client) - if err != nil { - level.Error(d.logger).Log("msg", "Failed to check networking.k8s.io/v1 availability", "err", err) - } - return err - }, - ) { - d.Unlock() - return - } - for _, namespace := range namespaces { var informer cache.SharedInformer - if v1Supported { - i := d.client.NetworkingV1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) - } else { - i := d.client.NetworkingV1beta1().Ingresses(namespace) - ilw := &cache.ListWatch{ - ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.List(ctx, options) - }, - WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - options.FieldSelector = d.selectors.ingress.field - options.LabelSelector = d.selectors.ingress.label - return i.Watch(ctx, options) - }, - } - informer = d.mustNewSharedInformer(ilw, &v1beta1.Ingress{}, resyncDisabled) + i := d.client.NetworkingV1().Ingresses(namespace) + ilw := &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.List(ctx, options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + options.FieldSelector = d.selectors.ingress.field + options.LabelSelector = d.selectors.ingress.label + return i.Watch(ctx, options) + }, } + informer = d.mustNewSharedInformer(ilw, &networkv1.Ingress{}, resyncDisabled) ingress := NewIngress( - log.With(d.logger, "role", "ingress"), + d.logger.With("role", "ingress"), informer, d.metrics.eventCount, ) @@ -668,11 +592,11 @@ func (d *Discovery) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { } case RoleNode: nodeInformer := d.newNodeInformer(ctx) - node := NewNode(log.With(d.logger, "role", "node"), nodeInformer, d.metrics.eventCount) + node := NewNode(d.logger.With("role", "node"), nodeInformer, d.metrics.eventCount) d.discoverers = append(d.discoverers, node) go node.informer.Run(ctx.Done()) default: - level.Error(d.logger).Log("msg", "unknown Kubernetes discovery kind", "role", d.role) + d.logger.Error("unknown Kubernetes discovery kind", "role", d.role) } var wg sync.WaitGroup @@ -720,20 +644,6 @@ func retryOnError(ctx context.Context, interval time.Duration, f func() error) ( } } -func checkNetworkingV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // networking.k8s.io/v1 is available since Kubernetes v1.19 - // https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.19.md - return semVer.Major() >= 1 && semVer.Minor() >= 19, nil -} - func (d *Discovery) newNodeInformer(ctx context.Context) cache.SharedInformer { nlw := &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { @@ -756,7 +666,7 @@ func (d *Discovery) newPodsByNodeInformer(plw *cache.ListWatch) cache.SharedInde indexers[nodeIndex] = func(obj interface{}) ([]string, error) { pod, ok := obj.(*apiv1.Pod) if !ok { - return nil, fmt.Errorf("object is not a pod") + return nil, errors.New("object is not a pod") } return []string{pod.Spec.NodeName}, nil } @@ -770,7 +680,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[podIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var pods []string for _, target := range e.Subsets { @@ -789,7 +699,7 @@ func (d *Discovery) newEndpointsByNodeInformer(plw *cache.ListWatch) cache.Share indexers[nodeIndex] = func(obj interface{}) ([]string, error) { e, ok := obj.(*apiv1.Endpoints) if !ok { - return nil, fmt.Errorf("object is not endpoints") + return nil, errors.New("object is not endpoints") } var nodes []string for _, target := range e.Subsets { @@ -834,21 +744,8 @@ func (d *Discovery) newEndpointSlicesByNodeInformer(plw *cache.ListWatch, object } } } - case *disv1beta1.EndpointSlice: - for _, target := range e.Endpoints { - if target.TargetRef != nil { - switch target.TargetRef.Kind { - case "Pod": - if target.NodeName != nil { - nodes = append(nodes, *target.NodeName) - } - case "Node": - nodes = append(nodes, target.TargetRef.Name) - } - } - } default: - return nil, fmt.Errorf("object is not an endpointslice") + return nil, errors.New("object is not an endpointslice") } return nodes, nil @@ -882,21 +779,6 @@ func (d *Discovery) mustNewSharedIndexInformer(lw cache.ListerWatcher, exampleOb return informer } -func checkDiscoveryV1Supported(client kubernetes.Interface) (bool, error) { - k8sVer, err := client.Discovery().ServerVersion() - if err != nil { - return false, err - } - semVer, err := utilversion.ParseSemantic(k8sVer.String()) - if err != nil { - return false, err - } - // The discovery.k8s.io/v1beta1 API version of EndpointSlice will no longer be served in v1.25. - // discovery.k8s.io/v1 is available since Kubernetes v1.21 - // https://kubernetes.io/docs/reference/using-api/deprecation-guide/#v1-25 - return semVer.Major() >= 1 && semVer.Minor() >= 21, nil -} - func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, role Role) { labelSet[model.LabelName(metaLabelPrefix+string(role)+"_name")] = lv(objectMeta.Name) @@ -916,3 +798,13 @@ func addObjectMetaLabels(labelSet model.LabelSet, objectMeta metav1.ObjectMeta, func namespacedName(namespace, name string) string { return namespace + "/" + name } + +// nodeName knows how to handle the cache.DeletedFinalStateUnknown tombstone. +// It assumes the MetaNamespaceKeyFunc keyFunc is used, which uses the node name as the tombstone key. +func nodeName(o interface{}) (string, error) { + key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(o) + if err != nil { + return "", err + } + return key, nil +} diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go index fe419bc782..ba3cb1d32a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/metrics.go @@ -28,7 +28,7 @@ type kubernetesMetrics struct { metricRegisterer discovery.MetricRegisterer } -func newDiscovererMetrics(reg prometheus.Registerer, rmi discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { +func newDiscovererMetrics(reg prometheus.Registerer, _ discovery.RefreshMetricsInstantiator) discovery.DiscovererMetrics { m := &kubernetesMetrics{ eventCount: prometheus.NewCounterVec( prometheus.CounterOpts{ diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go index 74d87e22c4..0e0c5745f2 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/node.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -38,16 +38,16 @@ const ( // Node discovers Kubernetes nodes. type Node struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewNode returns a new node discovery. -func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { +func NewNode(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Node { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } nodeAddCount := eventCount.WithLabelValues(RoleNode.String(), MetricLabelRoleAdd) @@ -76,13 +76,13 @@ func NewNode(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.Coun }, }) if err != nil { - level.Error(l).Log("msg", "Error adding nodes event handler.", "err", err) + l.Error("Error adding nodes event handler.", "err", err) } return n } func (n *Node) enqueue(obj interface{}) { - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) + key, err := nodeName(obj) if err != nil { return } @@ -96,7 +96,7 @@ func (n *Node) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), n.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(n.logger).Log("msg", "node informer unable to sync cache") + n.logger.Error("node informer unable to sync cache") } return } @@ -133,7 +133,7 @@ func (n *Node) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } node, err := convertToNode(o) if err != nil { - level.Error(n.logger).Log("msg", "converting to Node object failed", "err", err) + n.logger.Error("converting to Node object failed", "err", err) return true } send(ctx, ch, n.buildNode(node)) @@ -181,7 +181,7 @@ func (n *Node) buildNode(node *apiv1.Node) *targetgroup.Group { addr, addrMap, err := nodeAddress(node) if err != nil { - level.Warn(n.logger).Log("msg", "No node address found", "err", err) + n.logger.Warn("No node address found", "err", err) return nil } addr = net.JoinHostPort(addr, strconv.FormatInt(int64(node.Status.DaemonEndpoints.KubeletEndpoint.Port), 10)) diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go index 02990e415f..8704a66239 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/pod.go @@ -17,14 +17,14 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" "strings" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" @@ -44,14 +44,14 @@ type Pod struct { nodeInf cache.SharedInformer withNodeMetadata bool store cache.Store - logger log.Logger + logger *slog.Logger queue *workqueue.Type } // NewPod creates a new pod discovery. -func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { +func NewPod(l *slog.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInformer, eventCount *prometheus.CounterVec) *Pod { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } podAddCount := eventCount.WithLabelValues(RolePod.String(), MetricLabelRoleAdd) @@ -81,7 +81,7 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } if p.withNodeMetadata { @@ -95,12 +95,15 @@ func NewPod(l log.Logger, pods cache.SharedIndexInformer, nodes cache.SharedInfo p.enqueuePodsForNode(node.Name) }, DeleteFunc: func(o interface{}) { - node := o.(*apiv1.Node) - p.enqueuePodsForNode(node.Name) + nodeName, err := nodeName(o) + if err != nil { + l.Error("Error getting Node name", "err", err) + } + p.enqueuePodsForNode(nodeName) }, }) if err != nil { - level.Error(l).Log("msg", "Error adding pods event handler.", "err", err) + l.Error("Error adding pods event handler.", "err", err) } } @@ -127,7 +130,7 @@ func (p *Pod) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), cacheSyncs...) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(p.logger).Log("msg", "pod informer unable to sync cache") + p.logger.Error("pod informer unable to sync cache") } return } @@ -164,7 +167,7 @@ func (p *Pod) process(ctx context.Context, ch chan<- []*targetgroup.Group) bool } pod, err := convertToPod(o) if err != nil { - level.Error(p.logger).Log("msg", "converting to Pod object failed", "err", err) + p.logger.Error("converting to Pod object failed", "err", err) return true } send(ctx, ch, p.buildPod(pod)) @@ -246,7 +249,7 @@ func (p *Pod) findPodContainerStatus(statuses *[]apiv1.ContainerStatus, containe func (p *Pod) findPodContainerID(statuses *[]apiv1.ContainerStatus, containerName string) string { cStatus, err := p.findPodContainerStatus(statuses, containerName) if err != nil { - level.Debug(p.logger).Log("msg", "cannot find container ID", "err", err) + p.logger.Debug("cannot find container ID", "err", err) return "" } return cStatus.ContainerID @@ -315,7 +318,7 @@ func (p *Pod) buildPod(pod *apiv1.Pod) *targetgroup.Group { func (p *Pod) enqueuePodsForNode(nodeName string) { pods, err := p.podInf.GetIndexer().ByIndex(nodeIndex, nodeName) if err != nil { - level.Error(p.logger).Log("msg", "Error getting pods for node", "node", nodeName, "err", err) + p.logger.Error("Error getting pods for node", "node", nodeName, "err", err) return } diff --git a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go index 51204a5a1a..e666497c86 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go +++ b/vendor/github.com/prometheus/prometheus/discovery/kubernetes/service.go @@ -17,13 +17,13 @@ import ( "context" "errors" "fmt" + "log/slog" "net" "strconv" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/common/promslog" apiv1 "k8s.io/api/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" @@ -33,16 +33,16 @@ import ( // Service implements discovery of Kubernetes services. type Service struct { - logger log.Logger + logger *slog.Logger informer cache.SharedInformer store cache.Store queue *workqueue.Type } // NewService returns a new service discovery. -func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { +func NewService(l *slog.Logger, inf cache.SharedInformer, eventCount *prometheus.CounterVec) *Service { if l == nil { - l = log.NewNopLogger() + l = promslog.NewNopLogger() } svcAddCount := eventCount.WithLabelValues(RoleService.String(), MetricLabelRoleAdd) @@ -71,7 +71,7 @@ func NewService(l log.Logger, inf cache.SharedInformer, eventCount *prometheus.C }, }) if err != nil { - level.Error(l).Log("msg", "Error adding services event handler.", "err", err) + l.Error("Error adding services event handler.", "err", err) } return s } @@ -91,7 +91,7 @@ func (s *Service) Run(ctx context.Context, ch chan<- []*targetgroup.Group) { if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) { if !errors.Is(ctx.Err(), context.Canceled) { - level.Error(s.logger).Log("msg", "service informer unable to sync cache") + s.logger.Error("service informer unable to sync cache") } return } @@ -128,7 +128,7 @@ func (s *Service) process(ctx context.Context, ch chan<- []*targetgroup.Group) b } eps, err := convertToService(o) if err != nil { - level.Error(s.logger).Log("msg", "converting to Service object failed", "err", err) + s.logger.Error("converting to Service object failed", "err", err) return true } send(ctx, ch, s.buildService(eps)) diff --git a/vendor/github.com/prometheus/prometheus/discovery/manager.go b/vendor/github.com/prometheus/prometheus/discovery/manager.go index 897d7d151c..3219117d2a 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/manager.go +++ b/vendor/github.com/prometheus/prometheus/discovery/manager.go @@ -16,14 +16,14 @@ package discovery import ( "context" "fmt" + "log/slog" "reflect" "sync" "time" - "github.com/go-kit/log" - "github.com/go-kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/config" + "github.com/prometheus/common/promslog" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -64,7 +64,7 @@ func (p *Provider) Config() interface{} { return p.config } -// Registers the metrics needed for SD mechanisms. +// CreateAndRegisterSDMetrics registers the metrics needed for SD mechanisms. // Does not register the metrics for the Discovery Manager. // TODO(ptodev): Add ability to unregister the metrics? func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]DiscovererMetrics, error) { @@ -81,9 +81,9 @@ func CreateAndRegisterSDMetrics(reg prometheus.Registerer) (map[string]Discovere } // NewManager is the Discovery Manager constructor. -func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { +func NewManager(ctx context.Context, logger *slog.Logger, registerer prometheus.Registerer, sdMetrics map[string]DiscovererMetrics, options ...func(*Manager)) *Manager { if logger == nil { - logger = log.NewNopLogger() + logger = promslog.NewNopLogger() } mgr := &Manager{ logger: logger, @@ -101,12 +101,12 @@ func NewManager(ctx context.Context, logger log.Logger, registerer prometheus.Re // Register the metrics. // We have to do this after setting all options, so that the name of the Manager is set. - if metrics, err := NewManagerMetrics(registerer, mgr.name); err == nil { - mgr.metrics = metrics - } else { - level.Error(logger).Log("msg", "Failed to create discovery manager metrics", "manager", mgr.name, "err", err) + metrics, err := NewManagerMetrics(registerer, mgr.name) + if err != nil { + logger.Error("Failed to create discovery manager metrics", "manager", mgr.name, "err", err) return nil } + mgr.metrics = metrics return mgr } @@ -141,7 +141,7 @@ func HTTPClientOptions(opts ...config.HTTPClientOption) func(*Manager) { // Manager maintains a set of discovery providers and sends each update to a map channel. // Targets are grouped by the target set name. type Manager struct { - logger log.Logger + logger *slog.Logger name string httpOpts []config.HTTPClientOption mtx sync.RWMutex @@ -212,9 +212,7 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { m.metrics.FailedConfigs.Set(float64(failedCount)) var ( - wg sync.WaitGroup - // keep shows if we keep any providers after reload. - keep bool + wg sync.WaitGroup newProviders []*Provider ) for _, prov := range m.providers { @@ -228,13 +226,12 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { continue } newProviders = append(newProviders, prov) - // refTargets keeps reference targets used to populate new subs' targets + // refTargets keeps reference targets used to populate new subs' targets as they should be the same. var refTargets map[string]*targetgroup.Group prov.mu.Lock() m.targetsMtx.Lock() for s := range prov.subs { - keep = true refTargets = m.targets[poolKey{s, prov.name}] // Remove obsolete subs' targets. if _, ok := prov.newSubs[s]; !ok { @@ -267,7 +264,9 @@ func (m *Manager) ApplyConfig(cfg map[string]Configs) error { // While startProvider does pull the trigger, it may take some time to do so, therefore // we pull the trigger as soon as possible so that downstream managers can populate their state. // See https://github.com/prometheus/prometheus/pull/8639 for details. - if keep { + // This also helps making the downstream managers drop stale targets as soon as possible. + // See https://github.com/prometheus/prometheus/pull/13147 for details. + if len(m.providers) > 0 { select { case m.triggerSend <- struct{}{}: default: @@ -288,12 +287,14 @@ func (m *Manager) StartCustomProvider(ctx context.Context, name string, worker D name: {}, }, } + m.mtx.Lock() m.providers = append(m.providers, p) + m.mtx.Unlock() m.startProvider(ctx, p) } func (m *Manager) startProvider(ctx context.Context, p *Provider) { - level.Debug(m.logger).Log("msg", "Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) + m.logger.Debug("Starting provider", "provider", p.name, "subs", fmt.Sprintf("%v", p.subs)) ctx, cancel := context.WithCancel(ctx) updates := make(chan []*targetgroup.Group) @@ -327,7 +328,7 @@ func (m *Manager) updater(ctx context.Context, p *Provider, updates chan []*targ case tgs, ok := <-updates: m.metrics.ReceivedUpdates.Inc() if !ok { - level.Debug(m.logger).Log("msg", "Discoverer channel closed", "provider", p.name) + m.logger.Debug("Discoverer channel closed", "provider", p.name) // Wait for provider cancellation to ensure targets are cleaned up when expected. <-ctx.Done() return @@ -363,7 +364,7 @@ func (m *Manager) sender() { case m.syncCh <- m.allGroups(): default: m.metrics.DelayedUpdates.Inc() - level.Debug(m.logger).Log("msg", "Discovery receiver's channel was full so will retry the next cycle") + m.logger.Debug("Discovery receiver's channel was full so will retry the next cycle") select { case m.triggerSend <- struct{}{}: default: @@ -393,8 +394,16 @@ func (m *Manager) updateGroup(poolKey poolKey, tgs []*targetgroup.Group) { m.targets[poolKey] = make(map[string]*targetgroup.Group) } for _, tg := range tgs { - if tg != nil { // Some Discoverers send nil target group so need to check for it to avoid panics. + // Some Discoverers send nil target group so need to check for it to avoid panics. + if tg == nil { + continue + } + if len(tg.Targets) > 0 { m.targets[poolKey][tg.Source] = tg + } else { + // The target group is empty, drop the corresponding entry to avoid leaks. + // In case the group yielded targets before, allGroups() will take care of making consumers drop them. + delete(m.targets[poolKey], tg.Source) } } } @@ -403,19 +412,33 @@ func (m *Manager) allGroups() map[string][]*targetgroup.Group { tSets := map[string][]*targetgroup.Group{} n := map[string]int{} + m.mtx.RLock() m.targetsMtx.Lock() - defer m.targetsMtx.Unlock() - for pkey, tsets := range m.targets { - for _, tg := range tsets { - // Even if the target group 'tg' is empty we still need to send it to the 'Scrape manager' - // to signal that it needs to stop all scrape loops for this target set. - tSets[pkey.setName] = append(tSets[pkey.setName], tg) - n[pkey.setName] += len(tg.Targets) + for _, p := range m.providers { + p.mu.RLock() + for s := range p.subs { + // Send empty lists for subs without any targets to make sure old stale targets are dropped by consumers. + // See: https://github.com/prometheus/prometheus/issues/12858 for details. + if _, ok := tSets[s]; !ok { + tSets[s] = []*targetgroup.Group{} + n[s] = 0 + } + if tsets, ok := m.targets[poolKey{s, p.name}]; ok { + for _, tg := range tsets { + tSets[s] = append(tSets[s], tg) + n[s] += len(tg.Targets) + } + } } + p.mu.RUnlock() } + m.targetsMtx.Unlock() + m.mtx.RUnlock() + for setName, v := range n { m.metrics.DiscoveredTargets.WithLabelValues(setName).Set(float64(v)) } + return tSets } @@ -435,12 +458,12 @@ func (m *Manager) registerProviders(cfgs Configs, setName string) int { } typ := cfg.Name() d, err := cfg.NewDiscoverer(DiscovererOptions{ - Logger: log.With(m.logger, "discovery", typ, "config", setName), + Logger: m.logger.With("discovery", typ, "config", setName), HTTPClientOptions: m.httpOpts, Metrics: m.sdMetrics[typ], }) if err != nil { - level.Error(m.logger).Log("msg", "Cannot create service discovery", "err", err, "type", typ, "config", setName) + m.logger.Error("Cannot create service discovery", "err", err, "type", typ, "config", setName) failed++ return } diff --git a/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go b/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go index d621165ced..ef49e591a3 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go +++ b/vendor/github.com/prometheus/prometheus/discovery/metrics_refresh.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// Metric vectors for the "refresh" package. +// RefreshMetricsVecs are metric vectors for the "refresh" package. // We define them here in the "discovery" package in order to avoid a cyclic dependency between // "discovery" and "refresh". type RefreshMetricsVecs struct { diff --git a/vendor/github.com/prometheus/prometheus/discovery/registry.go b/vendor/github.com/prometheus/prometheus/discovery/registry.go index 1f491d4ca9..93b88ccfab 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/registry.go +++ b/vendor/github.com/prometheus/prometheus/discovery/registry.go @@ -22,9 +22,8 @@ import ( "strings" "sync" - "gopkg.in/yaml.v2" - "github.com/prometheus/client_golang/prometheus" + "gopkg.in/yaml.v2" "github.com/prometheus/prometheus/discovery/targetgroup" ) @@ -267,7 +266,7 @@ func replaceYAMLTypeError(err error, oldTyp, newTyp reflect.Type) error { func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManager) (map[string]DiscovererMetrics, error) { err := rmm.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery refresh metrics") + return nil, errors.New("failed to create service discovery refresh metrics") } metrics := make(map[string]DiscovererMetrics) @@ -275,7 +274,7 @@ func RegisterSDMetrics(registerer prometheus.Registerer, rmm RefreshMetricsManag currentSdMetrics := conf.NewDiscovererMetrics(registerer, rmm) err = currentSdMetrics.Register() if err != nil { - return nil, fmt.Errorf("failed to create service discovery metrics") + return nil, errors.New("failed to create service discovery metrics") } metrics[conf.Name()] = currentSdMetrics } diff --git a/vendor/github.com/prometheus/prometheus/discovery/util.go b/vendor/github.com/prometheus/prometheus/discovery/util.go index 83cc640dd9..4e2a088518 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/util.go +++ b/vendor/github.com/prometheus/prometheus/discovery/util.go @@ -19,8 +19,8 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// A utility to be used by implementations of discovery.Discoverer -// which need to manage the lifetime of their metrics. +// MetricRegisterer is used by implementations of discovery.Discoverer that need +// to manage the lifetime of their metrics. type MetricRegisterer interface { RegisterMetrics() error UnregisterMetrics() @@ -34,7 +34,7 @@ type metricRegistererImpl struct { var _ MetricRegisterer = &metricRegistererImpl{} -// Creates an instance of a MetricRegisterer. +// NewMetricRegisterer creates an instance of a MetricRegisterer. // Typically called inside the implementation of the NewDiscoverer() method. func NewMetricRegisterer(reg prometheus.Registerer, metrics []prometheus.Collector) MetricRegisterer { return &metricRegistererImpl{ diff --git a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go index 08f55374ef..d03940f1b2 100644 --- a/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go +++ b/vendor/github.com/prometheus/prometheus/model/exemplar/exemplar.go @@ -15,8 +15,10 @@ package exemplar import "github.com/prometheus/prometheus/model/labels" -// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters -// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars +// ExemplarMaxLabelSetLength is defined by OpenMetrics: "The combined length of +// the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 +// UTF-8 characters." +// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#exemplars const ExemplarMaxLabelSetLength = 128 // Exemplar is additional information associated with a time series. @@ -49,7 +51,7 @@ func (e Exemplar) Equals(e2 Exemplar) bool { return e.Value == e2.Value } -// Sort first by timestamp, then value, then labels. +// Compare first timestamps, then values, then labels. func Compare(a, b Exemplar) int { if a.Ts < b.Ts { return -1 diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go index 2a37ea66d4..e5519a56d6 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/float_histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "strings" @@ -230,6 +231,17 @@ func (h *FloatHistogram) TestExpression() string { res = append(res, fmt.Sprintf("custom_values:%g", m.CustomValues)) } + switch m.CounterResetHint { + case UnknownCounterReset: + // Unknown is the default, don't add anything. + case CounterReset: + res = append(res, "counter_reset_hint:reset") + case NotCounterReset: + res = append(res, "counter_reset_hint:not_reset") + case GaugeType: + res = append(res, "counter_reset_hint:gauge") + } + addBuckets := func(kind, bucketsKey, offsetKey string, buckets []float64, spans []Span) []string { if len(spans) > 1 { panic(fmt.Sprintf("histogram with multiple %s spans not supported", kind)) @@ -293,6 +305,14 @@ func (h *FloatHistogram) Div(scalar float64) *FloatHistogram { h.ZeroCount /= scalar h.Count /= scalar h.Sum /= scalar + // Division by zero removes all buckets. + if scalar == 0 { + h.PositiveBuckets = nil + h.NegativeBuckets = nil + h.PositiveSpans = nil + h.NegativeSpans = nil + return h + } for i := range h.PositiveBuckets { h.PositiveBuckets[i] /= scalar } @@ -342,7 +362,7 @@ func (h *FloatHistogram) Add(other *FloatHistogram) (*FloatHistogram, error) { default: // All other cases shouldn't actually happen. // They are a direct collision of CounterReset and NotCounterReset. - // Conservatively set the CounterResetHint to "unknown" and isse a warning. + // Conservatively set the CounterResetHint to "unknown" and issue a warning. h.CounterResetHint = UnknownCounterReset // TODO(trevorwhitney): Actually issue the warning as soon as the plumbing for it is in place } @@ -658,7 +678,7 @@ func detectReset(currIt, prevIt *floatBucketIterator) bool { if !currIt.Next() { // Reached end of currIt early, therefore // previous histogram has a bucket that the - // current one does not have. Unlass all + // current one does not have. Unless all // remaining buckets in the previous histogram // are unpopulated, this is a reset. for { @@ -765,16 +785,16 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -788,7 +808,7 @@ func (h *FloatHistogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, false) @@ -891,7 +911,7 @@ func (h *FloatHistogram) trimBucketsInZeroBucket() { // reconcileZeroBuckets finds a zero bucket large enough to include the zero // buckets of both histograms (the receiving histogram and the other histogram) // with a zero threshold that is not within a populated bucket in either -// histogram. This method modifies the receiving histogram accourdingly, but +// histogram. This method modifies the receiving histogram accordingly, but // leaves the other histogram as is. Instead, it returns the zero count the // other histogram would have if it were modified. func (h *FloatHistogram) reconcileZeroBuckets(other *FloatHistogram) float64 { @@ -929,10 +949,10 @@ func (h *FloatHistogram) floatBucketIterator( positive bool, absoluteStartValue float64, targetSchema int32, ) floatBucketIterator { if h.UsesCustomBuckets() && targetSchema != h.Schema { - panic(fmt.Errorf("cannot merge from custom buckets schema to exponential schema")) + panic(errors.New("cannot merge from custom buckets schema to exponential schema")) } if !h.UsesCustomBuckets() && IsCustomBucketsSchema(targetSchema) { - panic(fmt.Errorf("cannot merge from exponential buckets schema to custom schema")) + panic(errors.New("cannot merge from exponential buckets schema to custom schema")) } if targetSchema > h.Schema { panic(fmt.Errorf("cannot merge from schema %d to %d", h.Schema, targetSchema)) diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go index e4b99ec420..778aefe282 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/histogram.go @@ -14,6 +14,7 @@ package histogram import ( + "errors" "fmt" "math" "slices" @@ -432,16 +433,16 @@ func (h *Histogram) Validate() error { return fmt.Errorf("custom buckets: %w", err) } if h.ZeroCount != 0 { - return fmt.Errorf("custom buckets: must have zero count of 0") + return errors.New("custom buckets: must have zero count of 0") } if h.ZeroThreshold != 0 { - return fmt.Errorf("custom buckets: must have zero threshold of 0") + return errors.New("custom buckets: must have zero threshold of 0") } if len(h.NegativeSpans) > 0 { - return fmt.Errorf("custom buckets: must not have negative spans") + return errors.New("custom buckets: must not have negative spans") } if len(h.NegativeBuckets) > 0 { - return fmt.Errorf("custom buckets: must not have negative buckets") + return errors.New("custom buckets: must not have negative buckets") } } else { if err := checkHistogramSpans(h.PositiveSpans, len(h.PositiveBuckets)); err != nil { @@ -455,7 +456,7 @@ func (h *Histogram) Validate() error { return fmt.Errorf("negative side: %w", err) } if h.CustomValues != nil { - return fmt.Errorf("histogram with exponential schema must not have custom bounds") + return errors.New("histogram with exponential schema must not have custom bounds") } } err := checkHistogramBuckets(h.PositiveBuckets, &pCount, true) diff --git a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go index 9e9a711c29..e6b33863bd 100644 --- a/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go +++ b/vendor/github.com/prometheus/prometheus/model/histogram/test_utils.go @@ -19,12 +19,12 @@ func GenerateBigTestHistograms(numHistograms, numBuckets int) []*Histogram { bucketsPerSide := numBuckets / 2 spanLength := uint32(bucketsPerSide / numSpans) // Given all bucket deltas are 1, sum bucketsPerSide + 1. - observationCount := bucketsPerSide * (1 + bucketsPerSide) + observationCount := uint64(bucketsPerSide) * (1 + uint64(bucketsPerSide)) var histograms []*Histogram for i := 0; i < numHistograms; i++ { h := &Histogram{ - Count: uint64(i + observationCount), + Count: uint64(i) + observationCount, ZeroCount: uint64(i), ZeroThreshold: 1e-128, Sum: 18.4 * float64(i+1), diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels.go index 01514abf38..ed66d73cbf 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels.go @@ -19,6 +19,7 @@ import ( "bytes" "slices" "strings" + "unsafe" "github.com/cespare/xxhash/v2" ) @@ -38,10 +39,10 @@ func (ls Labels) Bytes(buf []byte) []byte { b.WriteByte(labelSep) for i, l := range ls { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(l.Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(l.Value) } return b.Bytes() @@ -86,9 +87,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } @@ -106,9 +107,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { i++ default: b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) i++ j++ } @@ -130,9 +131,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, ls[i].Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, ls[i].Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -151,10 +152,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { i++ default: if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) i++ j++ @@ -177,10 +178,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { continue } if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(ls[i].Name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(ls[i].Value) } return b.Bytes() @@ -249,15 +250,7 @@ func (ls Labels) WithoutEmpty() Labels { // Equal returns whether the two label sets are equal. func Equal(ls, o Labels) bool { - if len(ls) != len(o) { - return false - } - for i, l := range ls { - if l != o[i] { - return false - } - } - return true + return slices.Equal(ls, o) } // EmptyLabels returns n empty Labels value, for convenience. @@ -315,7 +308,8 @@ func Compare(a, b Labels) int { return len(a) - len(b) } -// Copy labels from b on top of whatever was in ls previously, reusing memory or expanding if needed. +// CopyFrom copies labels from b on top of whatever was in ls previously, +// reusing memory or expanding if needed. func (ls *Labels) CopyFrom(b Labels) { (*ls) = append((*ls)[:0], b...) } @@ -422,7 +416,7 @@ type ScratchBuilder struct { add Labels } -// Symbol-table is no-op, just for api parity with dedupelabels. +// SymbolTable is no-op, just for api parity with dedupelabels. type SymbolTable struct{} func NewSymbolTable() *SymbolTable { return nil } @@ -458,7 +452,7 @@ func (b *ScratchBuilder) Add(name, value string) { b.add = append(b.add, Label{Name: name, Value: value}) } -// Add a name/value pair, using []byte instead of string. +// UnsafeAddBytes adds a name/value pair, using []byte instead of string. // The '-tags stringlabels' version of this function is unsafe, hence the name. // This version is safe - it copies the strings immediately - but we keep the same name so everything compiles. func (b *ScratchBuilder) UnsafeAddBytes(name, value []byte) { @@ -475,15 +469,20 @@ func (b *ScratchBuilder) Assign(ls Labels) { b.add = append(b.add[:0], ls...) // Copy on top of our slice, so we don't retain the input slice. } -// Return the name/value pairs added so far as a Labels object. +// Labels returns the name/value pairs added so far as a Labels object. // Note: if you want them sorted, call Sort() first. func (b *ScratchBuilder) Labels() Labels { // Copy the slice, so the next use of ScratchBuilder doesn't overwrite. return append([]Label{}, b.add...) } -// Write the newly-built Labels out to ls. +// Overwrite the newly-built Labels out to ls. // Callers must ensure that there are no other references to ls, or any strings fetched from it. func (b *ScratchBuilder) Overwrite(ls *Labels) { *ls = append((*ls)[:0], b.add...) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return (uint64(len(name)) + uint64(unsafe.Sizeof(name)) + uint64(len(value)) + uint64(unsafe.Sizeof(value))) * n +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go index 4bc94f84fe..005eaa509e 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_common.go @@ -29,10 +29,11 @@ const ( BucketLabel = "le" InstanceName = "instance" - labelSep = '\xfe' + labelSep = '\xfe' // Used at beginning of `Bytes` return. + sep = '\xff' // Used between labels in `Bytes` and `Hash`. ) -var seps = []byte{'\xff'} +var seps = []byte{sep} // Used with Hash, which has no WriteByte method. // Label is a key/value pair of strings. type Label struct { @@ -50,7 +51,11 @@ func (ls Labels) String() string { b.WriteByte(',') b.WriteByte(' ') } - b.WriteString(l.Name) + if !model.LabelName(l.Name).IsValidLegacy() { + b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Name)) + } else { + b.WriteString(l.Name) + } b.WriteByte('=') b.Write(strconv.AppendQuote(b.AvailableBuffer(), l.Value)) i++ @@ -94,12 +99,23 @@ func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error { } // IsValid checks if the metric name or label names are valid. -func (ls Labels) IsValid() bool { +func (ls Labels) IsValid(validationScheme model.ValidationScheme) bool { err := ls.Validate(func(l Label) error { - if l.Name == model.MetricNameLabel && !model.IsValidMetricName(model.LabelValue(l.Value)) { - return strconv.ErrSyntax + if l.Name == model.MetricNameLabel { + // If the default validation scheme has been overridden with legacy mode, + // we need to call the special legacy validation checker. + if validationScheme == model.LegacyValidation && !model.IsValidLegacyMetricName(string(model.LabelValue(l.Value))) { + return strconv.ErrSyntax + } + if !model.IsValidMetricName(model.LabelValue(l.Value)) { + return strconv.ErrSyntax + } } - if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { + if validationScheme == model.LegacyValidation { + if !model.LabelName(l.Name).IsValidLegacy() || !model.LabelValue(l.Value).IsValid() { + return strconv.ErrSyntax + } + } else if !model.LabelName(l.Name).IsValid() || !model.LabelValue(l.Value).IsValid() { return strconv.ErrSyntax } return nil @@ -218,5 +234,5 @@ func contains(s []Label, n string) bool { } func yoloString(b []byte) string { - return *((*string)(unsafe.Pointer(&b))) + return unsafe.String(unsafe.SliceData(b), len(b)) } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go index 0e5bb048be..a0d83e0044 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_dedupelabels.go @@ -146,13 +146,13 @@ func (ls Labels) Bytes(buf []byte) []byte { b := bytes.NewBuffer(buf[:0]) for i := 0; i < len(ls.data); { if i > 0 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } var name, value string name, i = decodeString(ls.syms, ls.data, i) value, i = decodeString(ls.syms, ls.data, i) b.WriteString(name) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(value) } return b.Bytes() @@ -201,9 +201,9 @@ func (ls Labels) Hash() uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) @@ -226,9 +226,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -252,9 +252,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -275,10 +275,10 @@ func (ls Labels) BytesWithLabels(buf []byte, names ...string) []byte { } if lName == names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -299,10 +299,10 @@ func (ls Labels) BytesWithoutLabels(buf []byte, names ...string) []byte { } if j == len(names) || lName != names[j] { if b.Len() > 1 { - b.WriteByte(seps[0]) + b.WriteByte(sep) } b.WriteString(lName) - b.WriteByte(seps[0]) + b.WriteByte(sep) b.WriteString(lValue) } pos = newPos @@ -815,3 +815,8 @@ func (b *ScratchBuilder) Overwrite(ls *Labels) { ls.syms = b.syms.nameTable ls.data = yoloString(b.overwriteBuffer) } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(len(name)+len(value)) + n*4 // Assuming most symbol-table entries are 2 bytes long. +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go index bccceb61fe..f49ed96f65 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/labels_stringlabels.go @@ -16,7 +16,6 @@ package labels import ( - "reflect" "slices" "strings" "unsafe" @@ -112,9 +111,9 @@ func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) { } if name == names[j] { b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } } @@ -138,9 +137,9 @@ func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) { continue } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b), b } @@ -299,10 +298,8 @@ func Equal(ls, o Labels) bool { func EmptyLabels() Labels { return Labels{} } -func yoloBytes(s string) (b []byte) { - *(*string)(unsafe.Pointer(&b)) = s - (*reflect.SliceHeader)(unsafe.Pointer(&b)).Cap = len(s) - return +func yoloBytes(s string) []byte { + return unsafe.Slice(unsafe.StringData(s), len(s)) } // New returns a sorted Labels from the given labels. @@ -338,8 +335,8 @@ func Compare(a, b Labels) int { } i := 0 // First, go 8 bytes at a time. Data strings are expected to be 8-byte aligned. - sp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&shorter)).Data) - lp := unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&longer)).Data) + sp := unsafe.Pointer(unsafe.StringData(shorter)) + lp := unsafe.Pointer(unsafe.StringData(longer)) for ; i < len(shorter)-8; i += 8 { if *(*uint64)(unsafe.Add(sp, i)) != *(*uint64)(unsafe.Add(lp, i)) { break @@ -694,3 +691,8 @@ func NewScratchBuilderWithSymbolTable(_ *SymbolTable, n int) ScratchBuilder { func (b *ScratchBuilder) SetSymbolTable(_ *SymbolTable) { // no-op } + +// SizeOfLabels returns the approximate space required for n copies of a label. +func SizeOfLabels(name, value string, n uint64) uint64 { + return uint64(labelSize(&Label{Name: name, Value: value})) * n +} diff --git a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go index d2151d83dd..cf6c9158e9 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/regexp.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/regexp.go @@ -63,13 +63,13 @@ func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) { // available, even if the string matcher is faster. m.matchString = m.stringMatcher.Matches } else { - parsed, err := syntax.Parse(v, syntax.Perl) + parsed, err := syntax.Parse(v, syntax.Perl|syntax.DotNL) if err != nil { return nil, err } // Simplify the syntax tree to run faster. parsed = parsed.Simplify() - m.re, err = regexp.Compile("^(?:" + parsed.String() + ")$") + m.re, err = regexp.Compile("^(?s:" + parsed.String() + ")$") if err != nil { return nil, err } @@ -802,7 +802,7 @@ type equalMultiStringMapMatcher struct { func (m *equalMultiStringMapMatcher) add(s string) { if !m.caseSensitive { - s = toNormalisedLower(s) + s = toNormalisedLower(s, nil) // Don't pass a stack buffer here - it will always escape to heap. } m.values[s] = struct{}{} @@ -840,15 +840,24 @@ func (m *equalMultiStringMapMatcher) setMatches() []string { } func (m *equalMultiStringMapMatcher) Matches(s string) bool { - if !m.caseSensitive { - s = toNormalisedLower(s) + if len(m.values) > 0 { + sNorm := s + var a [32]byte + if !m.caseSensitive { + sNorm = toNormalisedLower(s, a[:]) + } + if _, ok := m.values[sNorm]; ok { + return true + } } - if _, ok := m.values[s]; ok { - return true - } if m.minPrefixLen > 0 && len(s) >= m.minPrefixLen { - for _, matcher := range m.prefixes[s[:m.minPrefixLen]] { + prefix := s[:m.minPrefixLen] + var a [32]byte + if !m.caseSensitive { + prefix = toNormalisedLower(s[:m.minPrefixLen], a[:]) + } + for _, matcher := range m.prefixes[prefix] { if matcher.Matches(s) { return true } @@ -859,22 +868,37 @@ func (m *equalMultiStringMapMatcher) Matches(s string) bool { // toNormalisedLower normalise the input string using "Unicode Normalization Form D" and then convert // it to lower case. -func toNormalisedLower(s string) string { - var buf []byte +func toNormalisedLower(s string, a []byte) string { for i := 0; i < len(s); i++ { c := s[i] if c >= utf8.RuneSelf { return strings.Map(unicode.ToLower, norm.NFKD.String(s)) } if 'A' <= c && c <= 'Z' { - if buf == nil { - buf = []byte(s) - } - buf[i] = c + 'a' - 'A' + return toNormalisedLowerSlow(s, i, a) } } - if buf == nil { - return s + return s +} + +// toNormalisedLowerSlow is split from toNormalisedLower because having a call +// to `copy` slows it down even when it is not called. +func toNormalisedLowerSlow(s string, i int, a []byte) string { + var buf []byte + if cap(a) > len(s) { + buf = a[:len(s)] + copy(buf, s) + } else { + buf = []byte(s) + } + for ; i < len(s); i++ { + c := s[i] + if c >= utf8.RuneSelf { + return strings.Map(unicode.ToLower, norm.NFKD.String(s)) + } + if 'A' <= c && c <= 'Z' { + buf[i] = c + 'a' - 'A' + } } return yoloString(buf) } @@ -967,7 +991,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str return true } - analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + analysePrefixMatcherCallback := func(prefix string, prefixCaseSensitive bool, _ StringMatcher) bool { // Ensure we don't have mixed case sensitivity. if caseSensitiveSet && caseSensitive != prefixCaseSensitive { return false @@ -1002,7 +1026,7 @@ func optimizeEqualOrPrefixStringMatchers(input StringMatcher, threshold int) Str findEqualOrPrefixStringMatchers(input, func(matcher *equalStringMatcher) bool { multiMatcher.add(matcher.s) return true - }, func(prefix string, prefixCaseSensitive bool, matcher StringMatcher) bool { + }, func(prefix string, _ bool, matcher StringMatcher) bool { multiMatcher.addPrefix(prefix, caseSensitive, matcher) return true }) diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go index 5e3e89fbbb..8b3a369397 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding.go @@ -39,9 +39,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } return xxhash.Sum64(b) } diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go index 5912724f9b..5bf41b05d6 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_dedupelabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, value...) - b = append(b, seps[0]) + b = append(b, sep) pos = newPos } return xxhash.Sum64(b) diff --git a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go index 3ad2027d8c..798f268eb9 100644 --- a/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go +++ b/vendor/github.com/prometheus/prometheus/model/labels/sharding_stringlabels.go @@ -43,9 +43,9 @@ func StableHash(ls Labels) uint64 { } b = append(b, v.Name...) - b = append(b, seps[0]) + b = append(b, sep) b = append(b, v.Value...) - b = append(b, seps[0]) + b = append(b, sep) } if h != nil { return h.Sum64() diff --git a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go index 4f33edda43..70daef426f 100644 --- a/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go +++ b/vendor/github.com/prometheus/prometheus/model/relabel/relabel.go @@ -16,6 +16,8 @@ package relabel import ( "crypto/md5" "encoding/binary" + "encoding/json" + "errors" "fmt" "strconv" "strings" @@ -27,7 +29,8 @@ import ( ) var ( - relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) + // relabelTargetLegacy allows targeting labels with legacy Prometheus character set, plus ${} variables for dynamic characters from source the metrics. + relabelTargetLegacy = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`) DefaultRelabelConfig = Config{ Action: Replace, @@ -83,20 +86,20 @@ func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error { type Config struct { // A list of labels from which values are taken and concatenated // with the configured separator in order. - SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"` + SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty" json:"sourceLabels,omitempty"` // Separator is the string between concatenated values from the source labels. - Separator string `yaml:"separator,omitempty"` + Separator string `yaml:"separator,omitempty" json:"separator,omitempty"` // Regex against which the concatenation is matched. - Regex Regexp `yaml:"regex,omitempty"` + Regex Regexp `yaml:"regex,omitempty" json:"regex,omitempty"` // Modulus to take of the hash of concatenated values from the source labels. - Modulus uint64 `yaml:"modulus,omitempty"` + Modulus uint64 `yaml:"modulus,omitempty" json:"modulus,omitempty"` // TargetLabel is the label to which the resulting string is written in a replacement. // Regexp interpolation is allowed for the replace action. - TargetLabel string `yaml:"target_label,omitempty"` + TargetLabel string `yaml:"target_label,omitempty" json:"targetLabel,omitempty"` // Replacement is the regex replacement pattern to be used. - Replacement string `yaml:"replacement,omitempty"` + Replacement string `yaml:"replacement,omitempty" json:"replacement,omitempty"` // Action is the action to be performed for the relabeling. - Action Action `yaml:"action,omitempty"` + Action Action `yaml:"action,omitempty" json:"action,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -114,18 +117,25 @@ func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { func (c *Config) Validate() error { if c.Action == "" { - return fmt.Errorf("relabel action cannot be empty") + return errors.New("relabel action cannot be empty") } if c.Modulus == 0 && c.Action == HashMod { - return fmt.Errorf("relabel configuration for hashmod requires non-zero modulus") + return errors.New("relabel configuration for hashmod requires non-zero modulus") } if (c.Action == Replace || c.Action == HashMod || c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.TargetLabel == "" { return fmt.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action) } - if c.Action == Replace && !strings.Contains(c.TargetLabel, "$") && !model.LabelName(c.TargetLabel).IsValid() { + if c.Action == Replace && !varInRegexTemplate(c.TargetLabel) && !model.LabelName(c.TargetLabel).IsValid() { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } - if c.Action == Replace && strings.Contains(c.TargetLabel, "$") && !relabelTarget.MatchString(c.TargetLabel) { + + isValidLabelNameWithRegexVarFn := func(value string) bool { + // UTF-8 allows ${} characters, so standard validation allow $variables by default. + // TODO(bwplotka): Relabelling users cannot put $ and ${<...>} characters in metric names or values. + // Design escaping mechanism to allow that, once valid use case appears. + return model.LabelName(value).IsValid() + } + if c.Action == Replace && varInRegexTemplate(c.TargetLabel) && !isValidLabelNameWithRegexVarFn(c.TargetLabel) { return fmt.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action) } if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && !model.LabelName(c.TargetLabel).IsValid() { @@ -134,7 +144,7 @@ func (c *Config) Validate() error { if (c.Action == Lowercase || c.Action == Uppercase || c.Action == KeepEqual || c.Action == DropEqual) && c.Replacement != DefaultRelabelConfig.Replacement { return fmt.Errorf("'replacement' can not be set for %s action", c.Action) } - if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) { + if c.Action == LabelMap && !isValidLabelNameWithRegexVarFn(c.Replacement) { return fmt.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action) } if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() { @@ -171,7 +181,7 @@ type Regexp struct { // NewRegexp creates a new anchored Regexp and returns an error if the // passed-in regular expression does not compile. func NewRegexp(s string) (Regexp, error) { - regex, err := regexp.Compile("^(?:" + s + ")$") + regex, err := regexp.Compile("^(?s:" + s + ")$") return Regexp{Regexp: regex}, err } @@ -206,6 +216,25 @@ func (re Regexp) MarshalYAML() (interface{}, error) { return nil, nil } +// UnmarshalJSON implements the json.Unmarshaler interface. +func (re *Regexp) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + r, err := NewRegexp(s) + if err != nil { + return err + } + *re = r + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (re Regexp) MarshalJSON() ([]byte, error) { + return json.Marshal(re.String()) +} + // IsZero implements the yaml.IsZeroer interface. func (re Regexp) IsZero() bool { return re.Regexp == DefaultRelabelConfig.Regex.Regexp @@ -213,9 +242,13 @@ func (re Regexp) IsZero() bool { // String returns the original string used to compile the regular expression. func (re Regexp) String() string { + if re.Regexp == nil { + return "" + } + str := re.Regexp.String() - // Trim the anchor `^(?:` prefix and `)$` suffix. - return str[4 : len(str)-2] + // Trim the anchor `^(?s:` prefix and `)$` suffix. + return str[5 : len(str)-2] } // Process returns a relabeled version of the given label set. The relabel configurations @@ -273,6 +306,13 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return false } case Replace: + // Fast path to add or delete label pair. + if val == "" && cfg.Regex == DefaultRelabelConfig.Regex && + !varInRegexTemplate(cfg.TargetLabel) && !varInRegexTemplate(cfg.Replacement) { + lb.Set(cfg.TargetLabel, cfg.Replacement) + break + } + indexes := cfg.Regex.FindStringSubmatchIndex(val) // If there is no match no replacement must take place. if indexes == nil { @@ -322,3 +362,7 @@ func relabel(cfg *Config, lb *labels.Builder) (keep bool) { return true } + +func varInRegexTemplate(template string) bool { + return strings.Contains(template, "$") +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index 830e8a2c5e..9eebaed9ab 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -19,9 +19,8 @@ import ( "time" "github.com/prometheus/prometheus/model/labels" - "github.com/prometheus/prometheus/storage" - "github.com/prometheus/prometheus/promql/parser/posrange" + "github.com/prometheus/prometheus/storage" ) // Node is a generic interface for all nodes in an AST. @@ -111,6 +110,15 @@ type BinaryExpr struct { ReturnBool bool } +// DurationExpr represents a binary expression between two duration expressions. +type DurationExpr struct { + Op ItemType // The operation of the expression. + LHS, RHS Expr // The operands on the respective sides of the operator. + Wrapped bool // Set when the duration is wrapped in parentheses. + + StartPos posrange.Pos // For unary operations, the position of the operator. +} + // Call represents a function call. type Call struct { Func *Function // The function that was called. @@ -125,24 +133,27 @@ type MatrixSelector struct { // if the parser hasn't returned an error. VectorSelector Expr Range time.Duration - - EndPos posrange.Pos + RangeExpr *DurationExpr + EndPos posrange.Pos } // SubqueryExpr represents a subquery. type SubqueryExpr struct { - Expr Expr - Range time.Duration + Expr Expr + Range time.Duration + RangeExpr *DurationExpr // OriginalOffset is the actual offset that was set in the query. - // This never changes. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset expression that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution - // which is calculated using the original offset, at modifier time, + // which is calculated using the original offset, offset expression, at modifier time, // eval time, and subquery offsets in the AST tree. Offset time.Duration Timestamp *int64 StartOrEnd ItemType // Set when @ is used with start() or end() Step time.Duration + StepExpr *DurationExpr EndPos posrange.Pos } @@ -151,6 +162,7 @@ type SubqueryExpr struct { type NumberLiteral struct { Val float64 + Duration bool // Used to format the number as a duration. PosRange posrange.PositionRange } @@ -192,9 +204,10 @@ func (e *StepInvariantExpr) PositionRange() posrange.PositionRange { // VectorSelector represents a Vector selection. type VectorSelector struct { Name string - // OriginalOffset is the actual offset that was set in the query. - // This never changes. + // OriginalOffset is the actual offset calculated from OriginalOffsetExpr. OriginalOffset time.Duration + // OriginalOffsetExpr is the actual offset that was set in the query. + OriginalOffsetExpr *DurationExpr // Offset is the offset used during the query execution // which is calculated using the original offset, at modifier time, // eval time, and subquery offsets in the AST tree. @@ -208,6 +221,10 @@ type VectorSelector struct { UnexpandedSeriesSet storage.SeriesSet Series []storage.Series + // BypassEmptyMatcherCheck is true when the VectorSelector isn't required to have at least one matcher matching the empty string. + // This is the case when VectorSelector is used to represent the info function's second argument. + BypassEmptyMatcherCheck bool + PosRange posrange.PositionRange } @@ -241,6 +258,7 @@ func (e *BinaryExpr) Type() ValueType { return ValueTypeVector } func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } +func (e *DurationExpr) Type() ValueType { return ValueTypeScalar } func (*AggregateExpr) PromQLExpr() {} func (*BinaryExpr) PromQLExpr() {} @@ -253,6 +271,7 @@ func (*StringLiteral) PromQLExpr() {} func (*UnaryExpr) PromQLExpr() {} func (*VectorSelector) PromQLExpr() {} func (*StepInvariantExpr) PromQLExpr() {} +func (*DurationExpr) PromQLExpr() {} // VectorMatchCardinality describes the cardinality relationship // of two Vectors in a binary operation. @@ -352,8 +371,7 @@ func (f inspector) Visit(node Node, path []Node) (Visitor, error) { // f(node, path); node must not be nil. If f returns a nil error, Inspect invokes f // for all the non-nil children of node, recursively. func Inspect(node Node, f inspector) { - //nolint: errcheck - Walk(f, node, nil) + Walk(f, node, nil) //nolint:errcheck } // Children returns a list of all child nodes of a syntax tree node. @@ -419,7 +437,7 @@ func mergeRanges(first, last Node) posrange.PositionRange { } } -// Item implements the Node interface. +// PositionRange implements the Node interface. // This makes it possible to call mergeRanges on them. func (i *Item) PositionRange() posrange.PositionRange { return posrange.PositionRange{ @@ -436,6 +454,16 @@ func (e *BinaryExpr) PositionRange() posrange.PositionRange { return mergeRanges(e.LHS, e.RHS) } +func (e *DurationExpr) PositionRange() posrange.PositionRange { + if e.LHS == nil { + return posrange.PositionRange{ + Start: e.StartPos, + End: e.RHS.PositionRange().End, + } + } + return mergeRanges(e.LHS, e.RHS) +} + func (e *Call) PositionRange() posrange.PositionRange { return e.PosRange } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go index 99b41321fe..aa65aca275 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/functions.go @@ -202,10 +202,11 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeScalar, ValueTypeVector}, ReturnType: ValueTypeVector, }, - "holt_winters": { - Name: "holt_winters", - ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, - ReturnType: ValueTypeVector, + "double_exponential_smoothing": { + Name: "double_exponential_smoothing", + ArgTypes: []ValueType{ValueTypeMatrix, ValueTypeScalar, ValueTypeScalar}, + ReturnType: ValueTypeVector, + Experimental: true, }, "hour": { Name: "hour", @@ -223,6 +224,13 @@ var Functions = map[string]*Function{ ArgTypes: []ValueType{ValueTypeMatrix}, ReturnType: ValueTypeVector, }, + "info": { + Name: "info", + ArgTypes: []ValueType{ValueTypeVector, ValueTypeVector}, + ReturnType: ValueTypeVector, + Experimental: true, + Variadic: 1, + }, "irate": { Name: "irate", ArgTypes: []ValueType{ValueTypeMatrix}, diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index b99e67424f..de9234589c 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -23,6 +23,8 @@ import ( "github.com/prometheus/prometheus/model/value" "github.com/prometheus/prometheus/model/histogram" "github.com/prometheus/prometheus/promql/parser/posrange" + + "github.com/prometheus/common/model" ) %} @@ -84,6 +86,7 @@ NEGATIVE_BUCKETS_DESC ZERO_BUCKET_DESC ZERO_BUCKET_WIDTH_DESC CUSTOM_VALUES_DESC +COUNTER_RESET_HINT_DESC %token histogramDescEnd // Operators. @@ -149,6 +152,14 @@ START END %token preprocessorEnd +// Counter reset hints. +%token counterResetHintsStart +%token +UNKNOWN_COUNTER_RESET +COUNTER_RESET +NOT_COUNTER_RESET +GAUGE_TYPE +%token counterResetHintsEnd // Start symbols for the generated parser. %token startSymbolsStart @@ -163,7 +174,7 @@ START_METRIC_SELECTOR // Type definitions for grammar rules. %type label_match_list %type label_matcher -%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier +%type aggregate_op grouping_label match_op maybe_label metric_identifier unary_op at_modifier_preprocessors string_identifier counter_reset_hint %type label_set metric %type label_set_list %type