diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..5f556a9 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,31 @@ +name: Pact-Workshop(GoLang) + +on: + push: + branches: + - step3 + pull_request: + branches: + - step3 + +jobs: + build: + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: 1.21 + - name: install + run: make install + - name: install_pact_ffi_lib + run: make install_pact_ffi_lib + - name: consumer unit tests + run: make unit + - name: consumer pact tests + run: make consumer \ No newline at end of file diff --git a/Makefile b/Makefile index 544cd6e..d98357a 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,16 @@ -TEST?=./... - include ./make/config.mk install: @if [ ! -d pact/bin ]; then\ - echo "--- πŸ›  Installing Pact CLI dependencies";\ + echo "--- Installing Pact CLI dependencies";\ curl -fsSL https://raw.githubusercontent.com/pact-foundation/pact-ruby-standalone/master/install.sh | bash;\ fi +install_pact_ffi_lib: + go install github.com/pact-foundation/pact-go/v2@2.x.x + sudo mkdir -p /usr/local/lib/ + sudo $$HOME/go/bin/pact-go -l DEBUG install + run-consumer: @go run consumer/client/cmd/main.go @@ -16,10 +19,11 @@ run-provider: unit: @echo "--- πŸ”¨Running Unit tests " - go test -tags=unit -count=1 github.com/pact-foundation/pact-workshop-go/consumer/client -run 'TestClientUnit' + go test -tags=unit -count=1 github.com/pact-foundation/pact-workshop-go/consumer/client -run 'TestClientUnit' -v +consumer: export PACT_TEST := true consumer: install @echo "--- πŸ”¨Running Consumer Pact tests " - go test -tags=integration -count=1 github.com/pact-foundation/pact-workshop-go/consumer/client -run 'TestClientPact' + go test -tags=integration -count=1 github.com/pact-foundation/pact-workshop-go/consumer/client -run 'TestClientPact' -v .PHONY: install unit consumer provider run-provider run-consumer diff --git a/README.md b/README.md index a22f3b1..cc5613d 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,7 @@ # Pact Go workshop ## Introduction - -This workshop is aimed at demonstrating core features and benefits of contract testing with Pact. It uses a simple example +This workshop is aimed at demonstrating core features and benefits of contract testing with Pact. Whilst contract testing can be applied retrospectively to systems, we will follow the [consumer driven contracts](https://martinfowler.com/articles/consumerDrivenContracts.html) approach in this workshop - where a new consumer and provider are created in parallel to evolve a service over time, especially where there is some uncertainty with what is to be built. @@ -37,11 +36,12 @@ There are two components in scope for our workshop. For the purposes of this workshop, we won't implement any functionality of the Admin Service, except the bits that require User information. +**Project Structure** The key packages are shown below: ```sh -β”œβ”€β”€ consumer # Contains the Admin Service Team (client) project +β”œβ”€β”€ consumer # Contains the Admin Service Team (client) project β”œβ”€β”€ model # Shared domain model β”œβ”€β”€ pact # The directory of the Pact Standalone CLI β”œβ”€β”€ provider # The User Service Team (provider) project @@ -55,7 +55,7 @@ We need to first create an HTTP client to make the calls to our provider service _NOTE_: even if the API client had been been graciously provided for us by our Provider Team, it doesn't mean that we shouldn't write contract tests - because the version of the client we have may not always be in sync with the deployed API - and also because we will write tests on the output appropriate to our specific needs. -This User Service expects a `user` path parameter, and then returns some simple json back: +This User Service expects a `users` path parameter, and then returns some simple json back: ![Sequence Diagram](diagrams/workshop_step1_class-sequence-diagram.png) @@ -92,7 +92,7 @@ func TestClientUnit_GetUser(t *testing.T) { // Setup mock server server := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - assert.Equal(t, req.URL.String(), fmt.Sprintf("/user/%d", userID)) + assert.Equal(t, req.URL.String(), fmt.Sprintf("/users/%d", userID)) user, _ := json.Marshal(model.User{ FirstName: "Sally", LastName: "McDougall", @@ -163,40 +163,47 @@ _consumer/client/client_pact_test.go:_ t.Run("the user exists", func(t *testing.T) { id := 10 - pact. + err = mockProvider. AddInteraction(). Given("User sally exists"). UponReceiving("A request to login with user 'sally'"). - WithRequest(request{ - Method: "GET", - Path: term("/users/10", "/user/[0-9]+"), + WithRequestPathMatcher("GET", Regex("/users/"+strconv.Itoa(id), "/users/[0-9]+")). + WillRespondWith(200, func(b *consumer.V2ResponseBuilder) { + b.BodyMatch(model.User{}). + Header("Content-Type", Term("application/json", `application\/json`)). + Header("X-Api-Correlation-Id", Like("100")) }). - WillRespondWith(dsl.Response{ - Status: 200, - Body: dsl.Match(model.User{}), - Headers: commonHeaders, - }) + ExecuteTest(t, func(config consumer.MockServerConfig) error { + // Act: test our API client behaves correctly - err := pact.Verify(func() error { - user, err := client.GetUser(id) + // Get the Pact mock server URL + u, _ = url.Parse("http://" + config.Host + ":" + strconv.Itoa(config.Port)) - // Assert basic fact - if user.ID != id { - return fmt.Errorf("wanted user with ID %d but got %d", id, user.ID) - } + // Initialise the API client and point it at the Pact mock server + client = &Client{ + BaseURL: u, + } - return err - }) + // // Execute the API client + user, err := client.GetUser(id) + + // // Assert basic fact + if user.ID != id { + return fmt.Errorf("wanted user with ID %d but got %d", id, user.ID) + } + + return err + }) + + assert.NoError(t, err) - if err != nil { - t.Fatalf("Error on Verify: %v", err) - } }) ``` ![Test using Pact](diagrams/workshop_step3_pact.png) -This test starts a mock server a random port that acts as our provider service. To get this to work we update the URL in the `Client` that we create, after initialising Pact. + +This test starts a Pact mock server on a random port that acts as our provider service. . We can access the update the `config.Host` & `config.Port` from `consumer.MockServerConfig` in the `ExecuteTest` block and pass these into the `Client` that we create, after initialising Pact. Pact will ensure our client makes the request stated in the interaction. Running this test still passes, but it creates a pact file which we can use to validate our assumptions on the provider side, and have conversation around. @@ -204,6 +211,6 @@ Running this test still passes, but it creates a pact file which we can use to v $ make consumer ``` -A pact file should have been generated in _pacts/goadminservice-gouserservice.json_ +A pact file should have been generated in *pacts/GoAdminService-GoUserService.json* _Move on to [step 4](//github.com/pact-foundation/pact-workshop-go/tree/step4)_ diff --git a/consumer/client/client_pact_test.go b/consumer/client/client_pact_test.go index 9056d7e..6d73934 100644 --- a/consumer/client/client_pact_test.go +++ b/consumer/client/client_pact_test.go @@ -1,108 +1,90 @@ -// +build integration +//go:build integration package client import ( "fmt" "os" + "strconv" "testing" "net/url" - "github.com/pact-foundation/pact-go/dsl" + "github.com/pact-foundation/pact-go/v2/consumer" + "github.com/pact-foundation/pact-go/v2/log" + "github.com/pact-foundation/pact-go/v2/matchers" "github.com/pact-foundation/pact-workshop-go/model" + "github.com/stretchr/testify/assert" ) -var commonHeaders = dsl.MapMatcher{ - "Content-Type": term("application/json; charset=utf-8", `application\/json`), - "X-Api-Correlation-Id": dsl.Like("100"), -} +var Like = matchers.Like +var EachLike = matchers.EachLike +var Term = matchers.Term +var Regex = matchers.Regex +var HexValue = matchers.HexValue +var Identifier = matchers.Identifier +var IPAddress = matchers.IPAddress +var IPv6Address = matchers.IPv6Address +var Timestamp = matchers.Timestamp +var Date = matchers.Date +var Time = matchers.Time +var UUID = matchers.UUID +var ArrayMinLike = matchers.ArrayMinLike + +type S = matchers.S +type Map = matchers.MapMatcher var u *url.URL var client *Client -func TestMain(m *testing.M) { - var exitCode int - - // Setup Pact and related test stuff - setup() - - // Run all the tests - exitCode = m.Run() - - // Shutdown the Mock Service and Write pact files to disk - if err := pact.WritePact(); err != nil { - fmt.Println(err) - os.Exit(1) - } +func TestClientPact_GetUser(t *testing.T) { - pact.Teardown() - os.Exit(exitCode) -} + log.SetLogLevel("INFO") + mockProvider, err := consumer.NewV2Pact(consumer.MockHTTPProviderConfig{ + Consumer: os.Getenv("CONSUMER_NAME"), + Provider: os.Getenv("PROVIDER_NAME"), + LogDir: os.Getenv("LOG_DIR"), + PactDir: os.Getenv("PACT_DIR"), + }) + assert.NoError(t, err) -func TestClientPact_GetUser(t *testing.T) { t.Run("the user exists", func(t *testing.T) { id := 10 - pact. + err = mockProvider. AddInteraction(). Given("User sally exists"). UponReceiving("A request to login with user 'sally'"). - WithRequest(request{ - Method: "GET", - Path: term("/users/10", "/users/[0-9]+"), + WithRequestPathMatcher("GET", Regex("/users/"+strconv.Itoa(id), "/users/[0-9]+")). + WillRespondWith(200, func(b *consumer.V2ResponseBuilder) { + b.BodyMatch(model.User{}). + Header("Content-Type", Term("application/json", `application\/json`)). + Header("X-Api-Correlation-Id", Like("100")) }). - WillRespondWith(dsl.Response{ - Status: 200, - Body: dsl.Match(model.User{}), - Headers: commonHeaders, - }) + ExecuteTest(t, func(config consumer.MockServerConfig) error { + // Act: test our API client behaves correctly - err := pact.Verify(func() error { - user, err := client.GetUser(id) + // Get the Pact mock server URL + u, _ = url.Parse("http://" + config.Host + ":" + strconv.Itoa(config.Port)) - // Assert basic fact - if user.ID != id { - return fmt.Errorf("wanted user with ID %d but got %d", id, user.ID) - } + // Initialise the API client and point it at the Pact mock server + client = &Client{ + BaseURL: u, + } - return err - }) + // // Execute the API client + user, err := client.GetUser(id) - if err != nil { - t.Fatalf("Error on Verify: %v", err) - } - }) -} - -// Common test data -var pact dsl.Pact - -// Aliases -var term = dsl.Term - -type request = dsl.Request + // // Assert basic fact + if user.ID != id { + return fmt.Errorf("wanted user with ID %d but got %d", id, user.ID) + } -func setup() { - pact = createPact() - - // Proactively start service to get access to the port - pact.Setup(true) - - u, _ = url.Parse(fmt.Sprintf("http://localhost:%d", pact.Server.Port)) + return err + }) - client = &Client{ - BaseURL: u, - } + assert.NoError(t, err) -} + }) -func createPact() dsl.Pact { - return dsl.Pact{ - Consumer: os.Getenv("CONSUMER_NAME"), - Provider: os.Getenv("PROVIDER_NAME"), - LogDir: os.Getenv("LOG_DIR"), - PactDir: os.Getenv("PACT_DIR"), - LogLevel: "INFO", - } } diff --git a/consumer/client/cmd/main.go b/consumer/client/cmd/main.go index 099a98e..ef603c3 100644 --- a/consumer/client/cmd/main.go +++ b/consumer/client/cmd/main.go @@ -3,13 +3,10 @@ package main import ( "log" "net/url" - "time" "github.com/pact-foundation/pact-workshop-go/consumer/client" ) -var token = time.Now().Format("2006-01-02") - func main() { u, _ := url.Parse("http://localhost:8080") client := &client.Client{ diff --git a/go.mod b/go.mod index 8abf8c5..a6aeed2 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,59 @@ module github.com/pact-foundation/pact-workshop-go -go 1.14 +go 1.20 require ( - github.com/google/uuid v1.1.2 - github.com/pact-foundation/pact-go v1.5.0 - github.com/stretchr/testify v1.6.1 + github.com/google/uuid v1.3.0 + github.com/pact-foundation/pact-go/v2 v2.0.2 + github.com/stretchr/testify v1.8.4 +) + +require ( + cloud.google.com/go v0.110.4 // indirect + cloud.google.com/go/compute v1.21.0 // indirect + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/iam v1.1.1 // indirect + cloud.google.com/go/storage v1.31.0 // indirect + github.com/aws/aws-sdk-go v1.44.298 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/s2a-go v0.1.4 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.5 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.7.2 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/logutils v1.0.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/klauspost/compress v1.16.7 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/crypto v0.11.0 // indirect + golang.org/x/net v0.12.0 // indirect + golang.org/x/oauth2 v0.10.0 // indirect + golang.org/x/sync v0.3.0 // indirect + golang.org/x/sys v0.10.0 // indirect + golang.org/x/text v0.11.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/api v0.130.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/grpc v1.58.1 // indirect + google.golang.org/protobuf v1.31.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index b915a86..d64498b 100644 --- a/go.sum +++ b/go.sum @@ -1,24 +1,260 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= +cloud.google.com/go v0.110.4/go.mod h1:+EYjdK8e5RME/VY/qLCAtuyALQ9q67dvuum8i+H5xsI= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk= +cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v1.1.1 h1:lW7fzj15aVIXYHREOqjRBV9PsH0Z6u8Y46a1YGvQP4Y= +cloud.google.com/go/iam v1.1.1/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.31.0 h1:+S3LjjEN2zZ+L5hOwj4+1OkGCsLVe0NzpXKQ1pSdTCI= +cloud.google.com/go/storage v1.31.0/go.mod h1:81ams1PrhW16L4kF7qg+4mTq7SRs5HsbDTM0bWvrwJ0= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.298 h1:5qTxdubgV7PptZJmp/2qDwD2JL187ePL7VOxsSh1i3g= +github.com/aws/aws-sdk-go v1.44.298/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-playground/validator/v10 v10.3.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -26,83 +262,660 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= -github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.2.0 h1:3vNe/fWF5CBgRIguda1meWhsZHy3m8gCJ5wx+dIzX/E= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 h1:oD64EFjELI9RY9yoWlfua58r+etdnoIC871z+rr6lkA= -github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.2 h1:uJDtyXwEfalmp1PqdxuhZqrNkUyClZAhVeZYTArbqkg= +github.com/hashicorp/go-getter v1.7.2/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/pact-foundation/pact-go v1.5.0 h1:M7+QIs6sy49c+9ISJrCrFSJvOnTQYLePRgdX/NYKOWY= -github.com/pact-foundation/pact-go v1.5.0/go.mod h1:dN3I+UHNkIk5ZMxSt2+lqavCqfTu+xsx56ZME8BNWlU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/pact-foundation/pact-go/v2 v2.0.2 h1:bv1umDHG7b+dggreD9OXenABi5GNIAPZ3hjFvf6aAuc= +github.com/pact-foundation/pact-go/v2 v2.0.2/go.mod h1:opVM068i6ChOwke/1x9guppuRbIMM7vl6Z+TEGCfW6k= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/spf13/cobra v0.0.0-20160604044732-f447048345b6/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v0.0.0-20160427162146-cb88ea77998c/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= +github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= +golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4= +golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.130.0 h1:A50ujooa1h9iizvfzA4rrJr2B7uRmWexwbekQ2+5FPQ= +google.golang.org/api v0.130.0/go.mod h1:J/LCJMYSDFvAVREGCbrESb53n4++NMBDetSHGL5I5RY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g= +google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw= +google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.58.1 h1:OL+Vz23DTtrrldqHK49FUOPHyY75rvFqJfXC84NYW58= +google.golang.org/grpc v1.58.1/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -111,15 +924,35 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= +google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/make/config.mk b/make/config.mk index c4314f8..b108af6 100644 --- a/make/config.mk +++ b/make/config.mk @@ -1,4 +1,4 @@ -SHELL = "/bin/bash" +SHELL = /bin/bash export PATH := $(PWD)/pact/bin:$(PATH) export PATH @@ -6,7 +6,9 @@ export PROVIDER_NAME = GoUserService export CONSUMER_NAME = GoAdminService export PACT_DIR = $(PWD)/pacts export LOG_DIR = $(PWD)/log -export PACT_BROKER_USERNAME = dXfltyFMgNOFZAxr8io9wJ37iUpY42M -export PACT_BROKER_PASSWORD = O5AIZWxelWbLvqMd8PkAVycBJh2Psyg1 -export PACT_BROKER_PROTO = https -export PACT_BROKER_URL = test.pact.dius.com.au \ No newline at end of file +export PACT_BROKER_PROTO = http +export PACT_BROKER_URL = localhost:8081 +export PACT_BROKER_USERNAME = pact_workshop +export PACT_BROKER_PASSWORD = pact_workshop +export VERSION_COMMIT?=$(shell git rev-parse HEAD) +export VERSION_BRANCH?=$(shell git rev-parse --abbrev-ref HEAD) \ No newline at end of file diff --git a/provider/user_service.go b/provider/user_service.go index 5519099..1ea6d53 100644 --- a/provider/user_service.go +++ b/provider/user_service.go @@ -35,7 +35,7 @@ func WithCorrelationID(h http.HandlerFunc) http.HandlerFunc { // GetUser fetches a user if authenticated and exists func GetUser(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") // Get username from path a := strings.Split(r.URL.Path, "/") @@ -53,7 +53,7 @@ func GetUser(w http.ResponseWriter, r *http.Request) { // GetUsers fetches all users func GetUsers(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) resBody, _ := json.Marshal(userRepository.GetUsers()) w.Write(resBody) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a6..0000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md deleted file mode 100644 index 04fdf09..0000000 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ /dev/null @@ -1,10 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to this project! - -### Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -You may have already signed it for other Google projects. diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS deleted file mode 100644 index b4bb97f..0000000 --- a/vendor/github.com/google/uuid/CONTRIBUTORS +++ /dev/null @@ -1,9 +0,0 @@ -Paul Borman -bmatsuo -shawnps -theory -jboverfelt -dsymonds -cd1 -wallclockbuilder -dansouza diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE deleted file mode 100644 index 5dc6826..0000000 --- a/vendor/github.com/google/uuid/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md deleted file mode 100644 index f765a46..0000000 --- a/vendor/github.com/google/uuid/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) -The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) -and DCE 1.1: Authentication and Security Services. - -This package is based on the github.com/pborman/uuid package (previously named -code.google.com/p/go-uuid). It differs from these earlier packages in that -a UUID is a 16 byte array rather than a byte slice. One loss due to this -change is the ability to represent an invalid UUID (vs a NIL UUID). - -###### Install -`go get github.com/google/uuid` - -###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) - -Full `go doc` style documentation for the package can be viewed online without -installing this package by using the GoDoc site here: -http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go deleted file mode 100644 index fa820b9..0000000 --- a/vendor/github.com/google/uuid/dce.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "fmt" - "os" -) - -// A Domain represents a Version 2 domain -type Domain byte - -// Domain constants for DCE Security (Version 2) UUIDs. -const ( - Person = Domain(0) - Group = Domain(1) - Org = Domain(2) -) - -// NewDCESecurity returns a DCE Security (Version 2) UUID. -// -// The domain should be one of Person, Group or Org. -// On a POSIX system the id should be the users UID for the Person -// domain and the users GID for the Group. The meaning of id for -// the domain Org or on non-POSIX systems is site defined. -// -// For a given domain/id pair the same token may be returned for up to -// 7 minutes and 10 seconds. -func NewDCESecurity(domain Domain, id uint32) (UUID, error) { - uuid, err := NewUUID() - if err == nil { - uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 - uuid[9] = byte(domain) - binary.BigEndian.PutUint32(uuid[0:], id) - } - return uuid, err -} - -// NewDCEPerson returns a DCE Security (Version 2) UUID in the person -// domain with the id returned by os.Getuid. -// -// NewDCESecurity(Person, uint32(os.Getuid())) -func NewDCEPerson() (UUID, error) { - return NewDCESecurity(Person, uint32(os.Getuid())) -} - -// NewDCEGroup returns a DCE Security (Version 2) UUID in the group -// domain with the id returned by os.Getgid. -// -// NewDCESecurity(Group, uint32(os.Getgid())) -func NewDCEGroup() (UUID, error) { - return NewDCESecurity(Group, uint32(os.Getgid())) -} - -// Domain returns the domain for a Version 2 UUID. Domains are only defined -// for Version 2 UUIDs. -func (uuid UUID) Domain() Domain { - return Domain(uuid[9]) -} - -// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2 -// UUIDs. -func (uuid UUID) ID() uint32 { - return binary.BigEndian.Uint32(uuid[0:4]) -} - -func (d Domain) String() string { - switch d { - case Person: - return "Person" - case Group: - return "Group" - case Org: - return "Org" - } - return fmt.Sprintf("Domain%d", int(d)) -} diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go deleted file mode 100644 index 5b8a4b9..0000000 --- a/vendor/github.com/google/uuid/doc.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package uuid generates and inspects UUIDs. -// -// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security -// Services. -// -// A UUID is a 16 byte (128 bit) array. UUIDs may be used as keys to -// maps or compared directly. -package uuid diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod deleted file mode 100644 index fc84cd7..0000000 --- a/vendor/github.com/google/uuid/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/google/uuid diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go deleted file mode 100644 index b174616..0000000 --- a/vendor/github.com/google/uuid/hash.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "crypto/md5" - "crypto/sha1" - "hash" -) - -// Well known namespace IDs and UUIDs -var ( - NameSpaceDNS = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceURL = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) - NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) - Nil UUID // empty UUID, all zeros -) - -// NewHash returns a new UUID derived from the hash of space concatenated with -// data generated by h. The hash should be at least 16 byte in length. The -// first 16 bytes of the hash are used to form the UUID. The version of the -// UUID will be the lower 4 bits of version. NewHash is used to implement -// NewMD5 and NewSHA1. -func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { - h.Reset() - h.Write(space[:]) - h.Write(data) - s := h.Sum(nil) - var uuid UUID - copy(uuid[:], s) - uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) - uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant - return uuid -} - -// NewMD5 returns a new MD5 (Version 3) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(md5.New(), space, data, 3) -func NewMD5(space UUID, data []byte) UUID { - return NewHash(md5.New(), space, data, 3) -} - -// NewSHA1 returns a new SHA1 (Version 5) UUID based on the -// supplied name space and data. It is the same as calling: -// -// NewHash(sha1.New(), space, data, 5) -func NewSHA1(space UUID, data []byte) UUID { - return NewHash(sha1.New(), space, data, 5) -} diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go deleted file mode 100644 index 14bd340..0000000 --- a/vendor/github.com/google/uuid/marshal.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "fmt" - -// MarshalText implements encoding.TextMarshaler. -func (uuid UUID) MarshalText() ([]byte, error) { - var js [36]byte - encodeHex(js[:], uuid) - return js[:], nil -} - -// UnmarshalText implements encoding.TextUnmarshaler. -func (uuid *UUID) UnmarshalText(data []byte) error { - id, err := ParseBytes(data) - if err != nil { - return err - } - *uuid = id - return nil -} - -// MarshalBinary implements encoding.BinaryMarshaler. -func (uuid UUID) MarshalBinary() ([]byte, error) { - return uuid[:], nil -} - -// UnmarshalBinary implements encoding.BinaryUnmarshaler. -func (uuid *UUID) UnmarshalBinary(data []byte) error { - if len(data) != 16 { - return fmt.Errorf("invalid UUID (got %d bytes)", len(data)) - } - copy(uuid[:], data) - return nil -} diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go deleted file mode 100644 index d651a2b..0000000 --- a/vendor/github.com/google/uuid/node.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "sync" -) - -var ( - nodeMu sync.Mutex - ifname string // name of interface being used - nodeID [6]byte // hardware for version 1 UUIDs - zeroID [6]byte // nodeID with only 0's -) - -// NodeInterface returns the name of the interface from which the NodeID was -// derived. The interface "user" is returned if the NodeID was set by -// SetNodeID. -func NodeInterface() string { - defer nodeMu.Unlock() - nodeMu.Lock() - return ifname -} - -// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. -// If name is "" then the first usable interface found will be used or a random -// Node ID will be generated. If a named interface cannot be found then false -// is returned. -// -// SetNodeInterface never fails when name is "". -func SetNodeInterface(name string) bool { - defer nodeMu.Unlock() - nodeMu.Lock() - return setNodeInterface(name) -} - -func setNodeInterface(name string) bool { - iname, addr := getHardwareInterface(name) // null implementation for js - if iname != "" && addr != nil { - ifname = iname - copy(nodeID[:], addr) - return true - } - - // We found no interfaces with a valid hardware address. If name - // does not specify a specific interface generate a random Node ID - // (section 4.1.6) - if name == "" { - ifname = "random" - randomBits(nodeID[:]) - return true - } - return false -} - -// NodeID returns a slice of a copy of the current Node ID, setting the Node ID -// if not already set. -func NodeID() []byte { - defer nodeMu.Unlock() - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nid := nodeID - return nid[:] -} - -// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes -// of id are used. If id is less than 6 bytes then false is returned and the -// Node ID is not set. -func SetNodeID(id []byte) bool { - if len(id) < 6 { - return false - } - defer nodeMu.Unlock() - nodeMu.Lock() - copy(nodeID[:], id) - ifname = "user" - return true -} - -// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is -// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) NodeID() []byte { - var node [6]byte - copy(node[:], uuid[10:]) - return node[:] -} diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go deleted file mode 100644 index 24b78ed..0000000 --- a/vendor/github.com/google/uuid/node_js.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build js - -package uuid - -// getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. -// Using the "net" library inflates the size of the transpiled JS code by 673k bytes. -func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go deleted file mode 100644 index 0cbbcdd..0000000 --- a/vendor/github.com/google/uuid/node_net.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !js - -package uuid - -import "net" - -var interfaces []net.Interface // cached list of interfaces - -// getHardwareInterface returns the name and hardware address of interface name. -// If name is "" then the name and hardware address of one of the system's -// interfaces is returned. If no interfaces are found (name does not exist or -// there are no interfaces) then "", nil is returned. -// -// Only addresses of at least 6 bytes are returned. -func getHardwareInterface(name string) (string, []byte) { - if interfaces == nil { - var err error - interfaces, err = net.Interfaces() - if err != nil { - return "", nil - } - } - for _, ifs := range interfaces { - if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { - return ifs.Name, ifs.HardwareAddr - } - } - return "", nil -} diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go deleted file mode 100644 index f326b54..0000000 --- a/vendor/github.com/google/uuid/sql.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements sql.Scanner so UUIDs can be read from databases transparently -// Currently, database types that map to string and []byte are supported. Please -// consult database-specific driver documentation for matching types. -func (uuid *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case nil: - return nil - - case string: - // if an empty UUID comes from a table, we return a null UUID - if src == "" { - return nil - } - - // see Parse for required string format - u, err := Parse(src) - if err != nil { - return fmt.Errorf("Scan: %v", err) - } - - *uuid = u - - case []byte: - // if an empty UUID comes from a table, we return a null UUID - if len(src) == 0 { - return nil - } - - // assumes a simple slice of bytes if 16 bytes - // otherwise attempts to parse - if len(src) != 16 { - return uuid.Scan(string(src)) - } - copy((*uuid)[:], src) - - default: - return fmt.Errorf("Scan: unable to scan type %T into UUID", src) - } - - return nil -} - -// Value implements sql.Valuer so that UUIDs can be written to databases -// transparently. Currently, UUIDs map to strings. Please consult -// database-specific driver documentation for matching types. -func (uuid UUID) Value() (driver.Value, error) { - return uuid.String(), nil -} diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go deleted file mode 100644 index e6ef06c..0000000 --- a/vendor/github.com/google/uuid/time.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" - "sync" - "time" -) - -// A Time represents a time as the number of 100's of nanoseconds since 15 Oct -// 1582. -type Time int64 - -const ( - lillian = 2299160 // Julian day of 15 Oct 1582 - unix = 2440587 // Julian day of 1 Jan 1970 - epoch = unix - lillian // Days between epochs - g1582 = epoch * 86400 // seconds between epochs - g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs -) - -var ( - timeMu sync.Mutex - lasttime uint64 // last time we returned - clockSeq uint16 // clock sequence for this run - - timeNow = time.Now // for testing -) - -// UnixTime converts t the number of seconds and nanoseconds using the Unix -// epoch of 1 Jan 1970. -func (t Time) UnixTime() (sec, nsec int64) { - sec = int64(t - g1582ns100) - nsec = (sec % 10000000) * 100 - sec /= 10000000 - return sec, nsec -} - -// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and -// clock sequence as well as adjusting the clock sequence as needed. An error -// is returned if the current time cannot be determined. -func GetTime() (Time, uint16, error) { - defer timeMu.Unlock() - timeMu.Lock() - return getTime() -} - -func getTime() (Time, uint16, error) { - t := timeNow() - - // If we don't have a clock sequence already, set one. - if clockSeq == 0 { - setClockSequence(-1) - } - now := uint64(t.UnixNano()/100) + g1582ns100 - - // If time has gone backwards with this clock sequence then we - // increment the clock sequence - if now <= lasttime { - clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000 - } - lasttime = now - return Time(now), clockSeq, nil -} - -// ClockSequence returns the current clock sequence, generating one if not -// already set. The clock sequence is only used for Version 1 UUIDs. -// -// The uuid package does not use global static storage for the clock sequence or -// the last time a UUID was generated. Unless SetClockSequence is used, a new -// random clock sequence is generated the first time a clock sequence is -// requested by ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) -func ClockSequence() int { - defer timeMu.Unlock() - timeMu.Lock() - return clockSequence() -} - -func clockSequence() int { - if clockSeq == 0 { - setClockSequence(-1) - } - return int(clockSeq & 0x3fff) -} - -// SetClockSequence sets the clock sequence to the lower 14 bits of seq. Setting to -// -1 causes a new sequence to be generated. -func SetClockSequence(seq int) { - defer timeMu.Unlock() - timeMu.Lock() - setClockSequence(seq) -} - -func setClockSequence(seq int) { - if seq == -1 { - var b [2]byte - randomBits(b[:]) // clock sequence - seq = int(b[0])<<8 | int(b[1]) - } - oldSeq := clockSeq - clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant - if oldSeq != clockSeq { - lasttime = 0 - } -} - -// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in -// uuid. The time is only defined for version 1 and 2 UUIDs. -func (uuid UUID) Time() Time { - time := int64(binary.BigEndian.Uint32(uuid[0:4])) - time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 - time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 - return Time(time) -} - -// ClockSequence returns the clock sequence encoded in uuid. -// The clock sequence is only well defined for version 1 and 2 UUIDs. -func (uuid UUID) ClockSequence() int { - return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff -} diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go deleted file mode 100644 index 5ea6c73..0000000 --- a/vendor/github.com/google/uuid/util.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "io" -) - -// randomBits completely fills slice b with random data. -func randomBits(b []byte) { - if _, err := io.ReadFull(rander, b); err != nil { - panic(err.Error()) // rand should never fail - } -} - -// xvalues returns the value of a byte as a hexadecimal digit or 255. -var xvalues = [256]byte{ - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, - 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, -} - -// xtob converts hex characters x1 and x2 into a byte. -func xtob(x1, x2 byte) (byte, bool) { - b1 := xvalues[x1] - b2 := xvalues[x2] - return (b1 << 4) | b2, b1 != 255 && b2 != 255 -} diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go deleted file mode 100644 index 524404c..0000000 --- a/vendor/github.com/google/uuid/uuid.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2018 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "errors" - "fmt" - "io" - "strings" -) - -// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC -// 4122. -type UUID [16]byte - -// A Version represents a UUID's version. -type Version byte - -// A Variant represents a UUID's variant. -type Variant byte - -// Constants returned by Variant. -const ( - Invalid = Variant(iota) // Invalid UUID - RFC4122 // The variant specified in RFC4122 - Reserved // Reserved, NCS backward compatibility. - Microsoft // Reserved, Microsoft Corporation backward compatibility. - Future // Reserved for future definition. -) - -var rander = rand.Reader // random function - -// Parse decodes s into a UUID or returns an error. Both the standard UUID -// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the -// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex -// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. -func Parse(s string) (UUID, error) { - var uuid UUID - switch len(s) { - // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36: - - // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { - return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) - } - s = s[9:] - - // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - case 36 + 2: - s = s[1:] - - // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - case 32: - var ok bool - for i := range uuid { - uuid[i], ok = xtob(s[i*2], s[i*2+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(s)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(s[x], s[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// ParseBytes is like Parse, except it parses a byte slice instead of a string. -func ParseBytes(b []byte) (UUID, error) { - var uuid UUID - switch len(b) { - case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { - return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) - } - b = b[9:] - case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} - b = b[1:] - case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx - var ok bool - for i := 0; i < 32; i += 2 { - uuid[i/2], ok = xtob(b[i], b[i+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - } - return uuid, nil - default: - return uuid, fmt.Errorf("invalid UUID length: %d", len(b)) - } - // s is now at least 36 bytes long - // it must be of the form xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' { - return uuid, errors.New("invalid UUID format") - } - for i, x := range [16]int{ - 0, 2, 4, 6, - 9, 11, - 14, 16, - 19, 21, - 24, 26, 28, 30, 32, 34} { - v, ok := xtob(b[x], b[x+1]) - if !ok { - return uuid, errors.New("invalid UUID format") - } - uuid[i] = v - } - return uuid, nil -} - -// MustParse is like Parse but panics if the string cannot be parsed. -// It simplifies safe initialization of global variables holding compiled UUIDs. -func MustParse(s string) UUID { - uuid, err := Parse(s) - if err != nil { - panic(`uuid: Parse(` + s + `): ` + err.Error()) - } - return uuid -} - -// FromBytes creates a new UUID from a byte slice. Returns an error if the slice -// does not have a length of 16. The bytes are copied from the slice. -func FromBytes(b []byte) (uuid UUID, err error) { - err = uuid.UnmarshalBinary(b) - return uuid, err -} - -// Must returns uuid if err is nil and panics otherwise. -func Must(uuid UUID, err error) UUID { - if err != nil { - panic(err) - } - return uuid -} - -// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx -// , or "" if uuid is invalid. -func (uuid UUID) String() string { - var buf [36]byte - encodeHex(buf[:], uuid) - return string(buf[:]) -} - -// URN returns the RFC 2141 URN form of uuid, -// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. -func (uuid UUID) URN() string { - var buf [36 + 9]byte - copy(buf[:], "urn:uuid:") - encodeHex(buf[9:], uuid) - return string(buf[:]) -} - -func encodeHex(dst []byte, uuid UUID) { - hex.Encode(dst, uuid[:4]) - dst[8] = '-' - hex.Encode(dst[9:13], uuid[4:6]) - dst[13] = '-' - hex.Encode(dst[14:18], uuid[6:8]) - dst[18] = '-' - hex.Encode(dst[19:23], uuid[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], uuid[10:]) -} - -// Variant returns the variant encoded in uuid. -func (uuid UUID) Variant() Variant { - switch { - case (uuid[8] & 0xc0) == 0x80: - return RFC4122 - case (uuid[8] & 0xe0) == 0xc0: - return Microsoft - case (uuid[8] & 0xe0) == 0xe0: - return Future - default: - return Reserved - } -} - -// Version returns the version of uuid. -func (uuid UUID) Version() Version { - return Version(uuid[6] >> 4) -} - -func (v Version) String() string { - if v > 15 { - return fmt.Sprintf("BAD_VERSION_%d", v) - } - return fmt.Sprintf("VERSION_%d", v) -} - -func (v Variant) String() string { - switch v { - case RFC4122: - return "RFC4122" - case Reserved: - return "Reserved" - case Microsoft: - return "Microsoft" - case Future: - return "Future" - case Invalid: - return "Invalid" - } - return fmt.Sprintf("BadVariant%d", int(v)) -} - -// SetRand sets the random number generator to r, which implements io.Reader. -// If r.Read returns an error when the package requests random data then -// a panic will be issued. -// -// Calling SetRand with nil sets the random number generator to the default -// generator. -func SetRand(r io.Reader) { - if r == nil { - rander = rand.Reader - return - } - rander = r -} diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go deleted file mode 100644 index 4631096..0000000 --- a/vendor/github.com/google/uuid/version1.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import ( - "encoding/binary" -) - -// NewUUID returns a Version 1 UUID based on the current NodeID and clock -// sequence, and the current time. If the NodeID has not been set by SetNodeID -// or SetNodeInterface then it will be set automatically. If the NodeID cannot -// be set NewUUID returns nil. If clock sequence has not been set by -// SetClockSequence then it will be set automatically. If GetTime fails to -// return the current NewUUID returns nil and an error. -// -// In most cases, New should be used. -func NewUUID() (UUID, error) { - var uuid UUID - now, seq, err := GetTime() - if err != nil { - return uuid, err - } - - timeLow := uint32(now & 0xffffffff) - timeMid := uint16((now >> 32) & 0xffff) - timeHi := uint16((now >> 48) & 0x0fff) - timeHi |= 0x1000 // Version 1 - - binary.BigEndian.PutUint32(uuid[0:], timeLow) - binary.BigEndian.PutUint16(uuid[4:], timeMid) - binary.BigEndian.PutUint16(uuid[6:], timeHi) - binary.BigEndian.PutUint16(uuid[8:], seq) - - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - copy(uuid[10:], nodeID[:]) - nodeMu.Unlock() - - return uuid, nil -} diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go deleted file mode 100644 index c110465..0000000 --- a/vendor/github.com/google/uuid/version4.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 Google Inc. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package uuid - -import "io" - -// New creates a new random UUID or panics. New is equivalent to -// the expression -// -// uuid.Must(uuid.NewRandom()) -func New() UUID { - return Must(NewRandom()) -} - -// NewRandom returns a Random (Version 4) UUID. -// -// The strength of the UUIDs is based on the strength of the crypto/rand -// package. -// -// A note about uniqueness derived from the UUID Wikipedia entry: -// -// Randomly generated UUIDs have 122 random bits. One's annual risk of being -// hit by a meteorite is estimated to be one chance in 17 billion, that -// means the probability is about 0.00000000006 (6 Γ— 10βˆ’11), -// equivalent to the odds of creating a few tens of trillions of UUIDs in a -// year and having one duplicate. -func NewRandom() (UUID, error) { - return NewRandomFromReader(rander) -} - -// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. -func NewRandomFromReader(r io.Reader) (UUID, error) { - var uuid UUID - _, err := io.ReadFull(r, uuid[:]) - if err != nil { - return Nil, err - } - uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 - uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 - return uuid, nil -} diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml deleted file mode 100644 index 542ca8b..0000000 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 - - 1.9 - - "1.10" - -script: - - go test diff --git a/vendor/github.com/hashicorp/go-version/LICENSE b/vendor/github.com/hashicorp/go-version/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/go-version/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. β€œContributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. β€œContributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. β€œContribution” - - means Covered Software of a particular Contributor. - -1.4. β€œCovered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. β€œIncompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. β€œExecutable Form” - - means any form of the work other than Source Code Form. - -1.7. β€œLarger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. β€œLicense” - - means this document. - -1.9. β€œLicensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. β€œModifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. β€œPatent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. β€œSecondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. β€œSource Code Form” - - means the form of the work preferred for making modifications. - -1.14. β€œYou” (or β€œYour”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, β€œYou” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, β€œcontrol” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an β€œas is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - β€œIncompatible With Secondary Licenses” Notice - - This Source Code Form is β€œIncompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/go-version/README.md b/vendor/github.com/hashicorp/go-version/README.md deleted file mode 100644 index 6f3a15c..0000000 --- a/vendor/github.com/hashicorp/go-version/README.md +++ /dev/null @@ -1,65 +0,0 @@ -# Versioning Library for Go -[![Build Status](https://travis-ci.org/hashicorp/go-version.svg?branch=master)](https://travis-ci.org/hashicorp/go-version) - -go-version is a library for parsing versions and version constraints, -and verifying versions against a set of constraints. go-version -can sort a collection of versions properly, handles prerelease/beta -versions, can increment versions, etc. - -Versions used with go-version must follow [SemVer](http://semver.org/). - -## Installation and Usage - -Package documentation can be found on -[GoDoc](http://godoc.org/github.com/hashicorp/go-version). - -Installation can be done with a normal `go get`: - -``` -$ go get github.com/hashicorp/go-version -``` - -#### Version Parsing and Comparison - -```go -v1, err := version.NewVersion("1.2") -v2, err := version.NewVersion("1.5+metadata") - -// Comparison example. There is also GreaterThan, Equal, and just -// a simple Compare that returns an int allowing easy >=, <=, etc. -if v1.LessThan(v2) { - fmt.Printf("%s is less than %s", v1, v2) -} -``` - -#### Version Constraints - -```go -v1, err := version.NewVersion("1.2") - -// Constraints example. -constraints, err := version.NewConstraint(">= 1.0, < 1.4") -if constraints.Check(v1) { - fmt.Printf("%s satisfies constraints %s", v1, constraints) -} -``` - -#### Version Sorting - -```go -versionsRaw := []string{"1.1", "0.7.1", "1.4-beta", "1.4", "2"} -versions := make([]*version.Version, len(versionsRaw)) -for i, raw := range versionsRaw { - v, _ := version.NewVersion(raw) - versions[i] = v -} - -// After this, the versions are properly sorted -sort.Sort(version.Collection(versions)) -``` - -## Issues and Contributing - -If you find an issue with this library, please report an issue. If you'd -like, we welcome any contributions. Fork this library and submit a pull -request. diff --git a/vendor/github.com/hashicorp/go-version/constraint.go b/vendor/github.com/hashicorp/go-version/constraint.go deleted file mode 100644 index d055759..0000000 --- a/vendor/github.com/hashicorp/go-version/constraint.go +++ /dev/null @@ -1,204 +0,0 @@ -package version - -import ( - "fmt" - "reflect" - "regexp" - "strings" -) - -// Constraint represents a single constraint for a version, such as -// ">= 1.0". -type Constraint struct { - f constraintFunc - check *Version - original string -} - -// Constraints is a slice of constraints. We make a custom type so that -// we can add methods to it. -type Constraints []*Constraint - -type constraintFunc func(v, c *Version) bool - -var constraintOperators map[string]constraintFunc - -var constraintRegexp *regexp.Regexp - -func init() { - constraintOperators = map[string]constraintFunc{ - "": constraintEqual, - "=": constraintEqual, - "!=": constraintNotEqual, - ">": constraintGreaterThan, - "<": constraintLessThan, - ">=": constraintGreaterThanEqual, - "<=": constraintLessThanEqual, - "~>": constraintPessimistic, - } - - ops := make([]string, 0, len(constraintOperators)) - for k := range constraintOperators { - ops = append(ops, regexp.QuoteMeta(k)) - } - - constraintRegexp = regexp.MustCompile(fmt.Sprintf( - `^\s*(%s)\s*(%s)\s*$`, - strings.Join(ops, "|"), - VersionRegexpRaw)) -} - -// NewConstraint will parse one or more constraints from the given -// constraint string. The string must be a comma-separated list of -// constraints. -func NewConstraint(v string) (Constraints, error) { - vs := strings.Split(v, ",") - result := make([]*Constraint, len(vs)) - for i, single := range vs { - c, err := parseSingle(single) - if err != nil { - return nil, err - } - - result[i] = c - } - - return Constraints(result), nil -} - -// Check tests if a version satisfies all the constraints. -func (cs Constraints) Check(v *Version) bool { - for _, c := range cs { - if !c.Check(v) { - return false - } - } - - return true -} - -// Returns the string format of the constraints -func (cs Constraints) String() string { - csStr := make([]string, len(cs)) - for i, c := range cs { - csStr[i] = c.String() - } - - return strings.Join(csStr, ",") -} - -// Check tests if a constraint is validated by the given version. -func (c *Constraint) Check(v *Version) bool { - return c.f(v, c.check) -} - -func (c *Constraint) String() string { - return c.original -} - -func parseSingle(v string) (*Constraint, error) { - matches := constraintRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed constraint: %s", v) - } - - check, err := NewVersion(matches[2]) - if err != nil { - return nil, err - } - - return &Constraint{ - f: constraintOperators[matches[1]], - check: check, - original: v, - }, nil -} - -func prereleaseCheck(v, c *Version) bool { - switch vPre, cPre := v.Prerelease() != "", c.Prerelease() != ""; { - case cPre && vPre: - // A constraint with a pre-release can only match a pre-release version - // with the same base segments. - return reflect.DeepEqual(c.Segments64(), v.Segments64()) - - case !cPre && vPre: - // A constraint without a pre-release can only match a version without a - // pre-release. - return false - - case cPre && !vPre: - // OK, except with the pessimistic operator - case !cPre && !vPre: - // OK - } - return true -} - -//------------------------------------------------------------------- -// Constraint functions -//------------------------------------------------------------------- - -func constraintEqual(v, c *Version) bool { - return v.Equal(c) -} - -func constraintNotEqual(v, c *Version) bool { - return !v.Equal(c) -} - -func constraintGreaterThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == 1 -} - -func constraintLessThan(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) == -1 -} - -func constraintGreaterThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) >= 0 -} - -func constraintLessThanEqual(v, c *Version) bool { - return prereleaseCheck(v, c) && v.Compare(c) <= 0 -} - -func constraintPessimistic(v, c *Version) bool { - // Using a pessimistic constraint with a pre-release, restricts versions to pre-releases - if !prereleaseCheck(v, c) || (c.Prerelease() != "" && v.Prerelease() == "") { - return false - } - - // If the version being checked is naturally less than the constraint, then there - // is no way for the version to be valid against the constraint - if v.LessThan(c) { - return false - } - // We'll use this more than once, so grab the length now so it's a little cleaner - // to write the later checks - cs := len(c.segments) - - // If the version being checked has less specificity than the constraint, then there - // is no way for the version to be valid against the constraint - if cs > len(v.segments) { - return false - } - - // Check the segments in the constraint against those in the version. If the version - // being checked, at any point, does not have the same values in each index of the - // constraints segments, then it cannot be valid against the constraint. - for i := 0; i < c.si-1; i++ { - if v.segments[i] != c.segments[i] { - return false - } - } - - // Check the last part of the segment in the constraint. If the version segment at - // this index is less than the constraints segment at this index, then it cannot - // be valid against the constraint - if c.segments[cs-1] > v.segments[cs-1] { - return false - } - - // If nothing has rejected the version by now, it's valid - return true -} diff --git a/vendor/github.com/hashicorp/go-version/go.mod b/vendor/github.com/hashicorp/go-version/go.mod deleted file mode 100644 index f528555..0000000 --- a/vendor/github.com/hashicorp/go-version/go.mod +++ /dev/null @@ -1 +0,0 @@ -module github.com/hashicorp/go-version diff --git a/vendor/github.com/hashicorp/go-version/version.go b/vendor/github.com/hashicorp/go-version/version.go deleted file mode 100644 index 4d1e6e2..0000000 --- a/vendor/github.com/hashicorp/go-version/version.go +++ /dev/null @@ -1,347 +0,0 @@ -package version - -import ( - "bytes" - "fmt" - "reflect" - "regexp" - "strconv" - "strings" -) - -// The compiled regular expression used to test the validity of a version. -var versionRegexp *regexp.Regexp - -// The raw regular expression string used for testing the validity -// of a version. -const VersionRegexpRaw string = `v?([0-9]+(\.[0-9]+)*?)` + - `(-([0-9]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)|(-?([A-Za-z\-~]+[0-9A-Za-z\-~]*(\.[0-9A-Za-z\-~]+)*)))?` + - `(\+([0-9A-Za-z\-~]+(\.[0-9A-Za-z\-~]+)*))?` + - `?` - -// Version represents a single version. -type Version struct { - metadata string - pre string - segments []int64 - si int - original string -} - -func init() { - versionRegexp = regexp.MustCompile("^" + VersionRegexpRaw + "$") -} - -// NewVersion parses the given version and returns a new -// Version. -func NewVersion(v string) (*Version, error) { - matches := versionRegexp.FindStringSubmatch(v) - if matches == nil { - return nil, fmt.Errorf("Malformed version: %s", v) - } - segmentsStr := strings.Split(matches[1], ".") - segments := make([]int64, len(segmentsStr)) - si := 0 - for i, str := range segmentsStr { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil { - return nil, fmt.Errorf( - "Error parsing version: %s", err) - } - - segments[i] = int64(val) - si++ - } - - // Even though we could support more than three segments, if we - // got less than three, pad it with 0s. This is to cover the basic - // default usecase of semver, which is MAJOR.MINOR.PATCH at the minimum - for i := len(segments); i < 3; i++ { - segments = append(segments, 0) - } - - pre := matches[7] - if pre == "" { - pre = matches[4] - } - - return &Version{ - metadata: matches[10], - pre: pre, - segments: segments, - si: si, - original: v, - }, nil -} - -// Must is a helper that wraps a call to a function returning (*Version, error) -// and panics if error is non-nil. -func Must(v *Version, err error) *Version { - if err != nil { - panic(err) - } - - return v -} - -// Compare compares this version to another version. This -// returns -1, 0, or 1 if this version is smaller, equal, -// or larger than the other version, respectively. -// -// If you want boolean results, use the LessThan, Equal, -// or GreaterThan methods. -func (v *Version) Compare(other *Version) int { - // A quick, efficient equality check - if v.String() == other.String() { - return 0 - } - - segmentsSelf := v.Segments64() - segmentsOther := other.Segments64() - - // If the segments are the same, we must compare on prerelease info - if reflect.DeepEqual(segmentsSelf, segmentsOther) { - preSelf := v.Prerelease() - preOther := other.Prerelease() - if preSelf == "" && preOther == "" { - return 0 - } - if preSelf == "" { - return 1 - } - if preOther == "" { - return -1 - } - - return comparePrereleases(preSelf, preOther) - } - - // Get the highest specificity (hS), or if they're equal, just use segmentSelf length - lenSelf := len(segmentsSelf) - lenOther := len(segmentsOther) - hS := lenSelf - if lenSelf < lenOther { - hS = lenOther - } - // Compare the segments - // Because a constraint could have more/less specificity than the version it's - // checking, we need to account for a lopsided or jagged comparison - for i := 0; i < hS; i++ { - if i > lenSelf-1 { - // This means Self had the lower specificity - // Check to see if the remaining segments in Other are all zeros - if !allZero(segmentsOther[i:]) { - // if not, it means that Other has to be greater than Self - return -1 - } - break - } else if i > lenOther-1 { - // this means Other had the lower specificity - // Check to see if the remaining segments in Self are all zeros - - if !allZero(segmentsSelf[i:]) { - //if not, it means that Self has to be greater than Other - return 1 - } - break - } - lhs := segmentsSelf[i] - rhs := segmentsOther[i] - if lhs == rhs { - continue - } else if lhs < rhs { - return -1 - } - // Otherwis, rhs was > lhs, they're not equal - return 1 - } - - // if we got this far, they're equal - return 0 -} - -func allZero(segs []int64) bool { - for _, s := range segs { - if s != 0 { - return false - } - } - return true -} - -func comparePart(preSelf string, preOther string) int { - if preSelf == preOther { - return 0 - } - - var selfInt int64 - selfNumeric := true - selfInt, err := strconv.ParseInt(preSelf, 10, 64) - if err != nil { - selfNumeric = false - } - - var otherInt int64 - otherNumeric := true - otherInt, err = strconv.ParseInt(preOther, 10, 64) - if err != nil { - otherNumeric = false - } - - // if a part is empty, we use the other to decide - if preSelf == "" { - if otherNumeric { - return -1 - } - return 1 - } - - if preOther == "" { - if selfNumeric { - return 1 - } - return -1 - } - - if selfNumeric && !otherNumeric { - return -1 - } else if !selfNumeric && otherNumeric { - return 1 - } else if !selfNumeric && !otherNumeric && preSelf > preOther { - return 1 - } else if selfInt > otherInt { - return 1 - } - - return -1 -} - -func comparePrereleases(v string, other string) int { - // the same pre release! - if v == other { - return 0 - } - - // split both pre releases for analyse their parts - selfPreReleaseMeta := strings.Split(v, ".") - otherPreReleaseMeta := strings.Split(other, ".") - - selfPreReleaseLen := len(selfPreReleaseMeta) - otherPreReleaseLen := len(otherPreReleaseMeta) - - biggestLen := otherPreReleaseLen - if selfPreReleaseLen > otherPreReleaseLen { - biggestLen = selfPreReleaseLen - } - - // loop for parts to find the first difference - for i := 0; i < biggestLen; i = i + 1 { - partSelfPre := "" - if i < selfPreReleaseLen { - partSelfPre = selfPreReleaseMeta[i] - } - - partOtherPre := "" - if i < otherPreReleaseLen { - partOtherPre = otherPreReleaseMeta[i] - } - - compare := comparePart(partSelfPre, partOtherPre) - // if parts are equals, continue the loop - if compare != 0 { - return compare - } - } - - return 0 -} - -// Equal tests if two versions are equal. -func (v *Version) Equal(o *Version) bool { - return v.Compare(o) == 0 -} - -// GreaterThan tests if this version is greater than another version. -func (v *Version) GreaterThan(o *Version) bool { - return v.Compare(o) > 0 -} - -// LessThan tests if this version is less than another version. -func (v *Version) LessThan(o *Version) bool { - return v.Compare(o) < 0 -} - -// Metadata returns any metadata that was part of the version -// string. -// -// Metadata is anything that comes after the "+" in the version. -// For example, with "1.2.3+beta", the metadata is "beta". -func (v *Version) Metadata() string { - return v.metadata -} - -// Prerelease returns any prerelease data that is part of the version, -// or blank if there is no prerelease data. -// -// Prerelease information is anything that comes after the "-" in the -// version (but before any metadata). For example, with "1.2.3-beta", -// the prerelease information is "beta". -func (v *Version) Prerelease() string { - return v.pre -} - -// Segments returns the numeric segments of the version as a slice of ints. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments() []int { - segmentSlice := make([]int, len(v.segments)) - for i, v := range v.segments { - segmentSlice[i] = int(v) - } - return segmentSlice -} - -// Segments64 returns the numeric segments of the version as a slice of int64s. -// -// This excludes any metadata or pre-release information. For example, -// for a version "1.2.3-beta", segments will return a slice of -// 1, 2, 3. -func (v *Version) Segments64() []int64 { - result := make([]int64, len(v.segments)) - copy(result, v.segments) - return result -} - -// String returns the full version string included pre-release -// and metadata information. -// -// This value is rebuilt according to the parsed segments and other -// information. Therefore, ambiguities in the version string such as -// prefixed zeroes (1.04.0 => 1.4.0), `v` prefix (v1.0.0 => 1.0.0), and -// missing parts (1.0 => 1.0.0) will be made into a canonicalized form -// as shown in the parenthesized examples. -func (v *Version) String() string { - var buf bytes.Buffer - fmtParts := make([]string, len(v.segments)) - for i, s := range v.segments { - // We can ignore err here since we've pre-parsed the values in segments - str := strconv.FormatInt(s, 10) - fmtParts[i] = str - } - fmt.Fprintf(&buf, strings.Join(fmtParts, ".")) - if v.pre != "" { - fmt.Fprintf(&buf, "-%s", v.pre) - } - if v.metadata != "" { - fmt.Fprintf(&buf, "+%s", v.metadata) - } - - return buf.String() -} - -// Original returns the original parsed version as-is, including any -// potential whitespace, `v` prefix, etc. -func (v *Version) Original() string { - return v.original -} diff --git a/vendor/github.com/hashicorp/go-version/version_collection.go b/vendor/github.com/hashicorp/go-version/version_collection.go deleted file mode 100644 index cc888d4..0000000 --- a/vendor/github.com/hashicorp/go-version/version_collection.go +++ /dev/null @@ -1,17 +0,0 @@ -package version - -// Collection is a type that implements the sort.Interface interface -// so that versions can be sorted. -type Collection []*Version - -func (v Collection) Len() int { - return len(v) -} - -func (v Collection) Less(i, j int) bool { - return v[i].LessThan(v[j]) -} - -func (v Collection) Swap(i, j int) { - v[i], v[j] = v[j], v[i] -} diff --git a/vendor/github.com/hashicorp/logutils/.gitignore b/vendor/github.com/hashicorp/logutils/.gitignore deleted file mode 100644 index 0026861..0000000 --- a/vendor/github.com/hashicorp/logutils/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/hashicorp/logutils/LICENSE b/vendor/github.com/hashicorp/logutils/LICENSE deleted file mode 100644 index c33dcc7..0000000 --- a/vendor/github.com/hashicorp/logutils/LICENSE +++ /dev/null @@ -1,354 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. β€œContributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. β€œContributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. β€œContribution” - - means Covered Software of a particular Contributor. - -1.4. β€œCovered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. β€œIncompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. β€œExecutable Form” - - means any form of the work other than Source Code Form. - -1.7. β€œLarger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. β€œLicense” - - means this document. - -1.9. β€œLicensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. β€œModifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. β€œPatent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. β€œSecondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. β€œSource Code Form” - - means the form of the work preferred for making modifications. - -1.14. β€œYou” (or β€œYour”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, β€œYou” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, β€œcontrol” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an β€œas is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - β€œIncompatible With Secondary Licenses” Notice - - This Source Code Form is β€œIncompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/logutils/README.md b/vendor/github.com/hashicorp/logutils/README.md deleted file mode 100644 index 49490ea..0000000 --- a/vendor/github.com/hashicorp/logutils/README.md +++ /dev/null @@ -1,36 +0,0 @@ -# logutils - -logutils is a Go package that augments the standard library "log" package -to make logging a bit more modern, without fragmenting the Go ecosystem -with new logging packages. - -## The simplest thing that could possibly work - -Presumably your application already uses the default `log` package. To switch, you'll want your code to look like the following: - -```go -package main - -import ( - "log" - "os" - - "github.com/hashicorp/logutils" -) - -func main() { - filter := &logutils.LevelFilter{ - Levels: []logutils.LogLevel{"DEBUG", "WARN", "ERROR"}, - MinLevel: logutils.LogLevel("WARN"), - Writer: os.Stderr, - } - log.SetOutput(filter) - - log.Print("[DEBUG] Debugging") // this will not print - log.Print("[WARN] Warning") // this will - log.Print("[ERROR] Erring") // and so will this - log.Print("Message I haven't updated") // and so will this -} -``` - -This logs to standard error exactly like go's standard logger. Any log messages you haven't converted to have a level will continue to print as before. diff --git a/vendor/github.com/hashicorp/logutils/level.go b/vendor/github.com/hashicorp/logutils/level.go deleted file mode 100644 index 6381bf1..0000000 --- a/vendor/github.com/hashicorp/logutils/level.go +++ /dev/null @@ -1,81 +0,0 @@ -// Package logutils augments the standard log package with levels. -package logutils - -import ( - "bytes" - "io" - "sync" -) - -type LogLevel string - -// LevelFilter is an io.Writer that can be used with a logger that -// will filter out log messages that aren't at least a certain level. -// -// Once the filter is in use somewhere, it is not safe to modify -// the structure. -type LevelFilter struct { - // Levels is the list of log levels, in increasing order of - // severity. Example might be: {"DEBUG", "WARN", "ERROR"}. - Levels []LogLevel - - // MinLevel is the minimum level allowed through - MinLevel LogLevel - - // The underlying io.Writer where log messages that pass the filter - // will be set. - Writer io.Writer - - badLevels map[LogLevel]struct{} - once sync.Once -} - -// Check will check a given line if it would be included in the level -// filter. -func (f *LevelFilter) Check(line []byte) bool { - f.once.Do(f.init) - - // Check for a log level - var level LogLevel - x := bytes.IndexByte(line, '[') - if x >= 0 { - y := bytes.IndexByte(line[x:], ']') - if y >= 0 { - level = LogLevel(line[x+1 : x+y]) - } - } - - _, ok := f.badLevels[level] - return !ok -} - -func (f *LevelFilter) Write(p []byte) (n int, err error) { - // Note in general that io.Writer can receive any byte sequence - // to write, but the "log" package always guarantees that we only - // get a single line. We use that as a slight optimization within - // this method, assuming we're dealing with a single, complete line - // of log data. - - if !f.Check(p) { - return len(p), nil - } - - return f.Writer.Write(p) -} - -// SetMinLevel is used to update the minimum log level -func (f *LevelFilter) SetMinLevel(min LogLevel) { - f.MinLevel = min - f.init() -} - -func (f *LevelFilter) init() { - badLevels := make(map[LogLevel]struct{}) - for _, level := range f.Levels { - if level == f.MinLevel { - break - } - badLevels[level] = struct{}{} - } - f.badLevels = badLevels -} diff --git a/vendor/github.com/pact-foundation/pact-go/LICENSE b/vendor/github.com/pact-foundation/pact-go/LICENSE deleted file mode 100644 index 1a6b7e0..0000000 --- a/vendor/github.com/pact-foundation/pact-go/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 Matt Fellows - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/pact-foundation/pact-go/client/message_service.go b/vendor/github.com/pact-foundation/pact-go/client/message_service.go deleted file mode 100644 index 9dd50f0..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/message_service.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "log" -) - -// MessageService is a wrapper for the Pact Message service. -type MessageService struct { - ServiceManager -} - -// NewService creates a new MessageService with default settings. -// Named Arguments allowed: -// --consumer -// --provider -// --pact-dir -func (v *MessageService) NewService(args []string) Service { - v.Args = args - - log.Printf("[DEBUG] starting message service with args: %v\n", v.Args) - v.Cmd = "pact-message" - - return v -} diff --git a/vendor/github.com/pact-foundation/pact-go/client/mock_service.go b/vendor/github.com/pact-foundation/pact-go/client/mock_service.go deleted file mode 100644 index f8f3870..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/mock_service.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -// MockService is a wrapper for the Pact Mock Service. -type MockService struct { - ServiceManager -} - -// NewService creates a new MockService with default settings. -func (m *MockService) NewService(args []string) Service { - m.Args = []string{ - "service", - } - m.Args = append(m.Args, args...) - - m.Cmd = getMockServiceCommandPath() - return m -} - -func getMockServiceCommandPath() string { - return "pact-mock-service" -} diff --git a/vendor/github.com/pact-foundation/pact-go/client/publish_service.go b/vendor/github.com/pact-foundation/pact-go/client/publish_service.go deleted file mode 100644 index 1f38b08..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/publish_service.go +++ /dev/null @@ -1,39 +0,0 @@ -package client - -import ( - "log" -) - -// PublishService is a wrapper for the Pact Provider Verifier Service. -type PublishService struct { - ServiceManager -} - -// NewService creates a new PublishService with default settings. -// Arguments allowed: -// -// --provider-base-url -// --pact-urls -// --provider-states-url -// --provider-states-setup-url -// --broker-username -// --broker-password -// --publish-verification-results -// --provider-app-version -// --custom-provider-headers -func (v *PublishService) NewService(args []string) Service { - log.Printf("[DEBUG] starting publish service with args: %v\n", args) - - v.Args = []string{ - "publish", - } - - v.Args = append(v.Args, args...) - v.Cmd = getPublisherCommandPath() - - return v -} - -func getPublisherCommandPath() string { - return "pact-broker" -} diff --git a/vendor/github.com/pact-foundation/pact-go/client/service.go b/vendor/github.com/pact-foundation/pact-go/client/service.go deleted file mode 100644 index e08ce3b..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/service.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package client is an internal package, implementing the raw interface to the -Pact CLI tools: The Pact Mock Service and Provider Verification "binaries." - -See https://github.com/pact-foundation/pact-provider-verifier and -https://github.com/bethesque/pact-mock_service for more on the Ruby "binaries". - -NOTE: The ultimate goal here is to replace the Ruby dependencies with a shared -library (Pact Reference - (https://github.com/pact-foundation/pact-reference/). -*/ -package client - -import ( - "os/exec" -) - -// Service is a process wrapper for 3rd party binaries. It will spawn an instance -// of the binary and manage the life-cycle and IO of the process. -type Service interface { - Setup() - Stop(pid int) (bool, error) - List() map[int]*exec.Cmd - Command() *exec.Cmd - Start() *exec.Cmd - NewService(args []string) Service -} diff --git a/vendor/github.com/pact-foundation/pact-go/client/service_manager.go b/vendor/github.com/pact-foundation/pact-go/client/service_manager.go deleted file mode 100644 index a8ae74d..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/service_manager.go +++ /dev/null @@ -1,178 +0,0 @@ -package client - -import ( - "bufio" - "log" - "os" - "os/exec" - "sync" - "time" -) - -// ServiceManager is the default implementation of the Service interface. -type ServiceManager struct { - Cmd string - processMap processMap - Args []string - Env []string - commandCompleteChan chan *exec.Cmd - commandCreatedChan chan *exec.Cmd -} - -// Setup the Management services. -func (s *ServiceManager) Setup() { - log.Println("[DEBUG] setting up a service manager") - - s.commandCreatedChan = make(chan *exec.Cmd) - s.commandCompleteChan = make(chan *exec.Cmd) - s.processMap = processMap{processes: make(map[int]*exec.Cmd)} - - // Listen for service create/kill - go s.addServiceMonitor() - go s.removeServiceMonitor() -} - -// addServiceMonitor watches a channel to add services into operation. -func (s *ServiceManager) addServiceMonitor() { - log.Println("[DEBUG] starting service creation monitor") - for { - select { - case p := <-s.commandCreatedChan: - if p != nil && p.Process != nil { - s.processMap.Set(p.Process.Pid, p) - } - } - } -} - -// removeServiceMonitor watches a channel to remove services from operation. -func (s *ServiceManager) removeServiceMonitor() { - log.Println("[DEBUG] starting service removal monitor") - var p *exec.Cmd - for { - select { - case p = <-s.commandCompleteChan: - if p != nil && p.Process != nil { - p.Process.Signal(os.Interrupt) - s.processMap.Delete(p.Process.Pid) - } - } - } -} - -// Stop a Service and returns the exit status. -func (s *ServiceManager) Stop(pid int) (bool, error) { - log.Println("[DEBUG] stopping service with pid", pid) - cmd := s.processMap.Get(pid) - - // Remove service from registry - go func() { - s.commandCompleteChan <- cmd - }() - - // Wait for error, kill if it takes too long - var err error - done := make(chan error, 1) - go func() { - done <- cmd.Wait() - }() - - select { - case <-time.After(3 * time.Second): - if err = cmd.Process.Kill(); err != nil { - log.Println("[ERROR] timeout reached, killing pid", pid) - - return false, err - } - case err = <-done: - if err != nil { - log.Println("[ERROR] error waiting for process to complete", err) - return false, err - } - } - - return true, nil -} - -// List all Service PIDs. -func (s *ServiceManager) List() map[int]*exec.Cmd { - log.Println("[DEBUG] listing services") - return s.processMap.processes -} - -// Command creates an os command to be run -func (s *ServiceManager) Command() *exec.Cmd { - cmd := exec.Command(s.Cmd, s.Args...) - env := os.Environ() - env = append(env, s.Env...) - cmd.Env = env - - return cmd -} - -// Start a Service and log its output. -func (s *ServiceManager) Start() *exec.Cmd { - log.Println("[DEBUG] starting service") - cmd := exec.Command(s.Cmd, s.Args...) - env := os.Environ() - env = append(env, s.Env...) - cmd.Env = env - - cmdReader, err := cmd.StdoutPipe() - if err != nil { - log.Printf("[ERROR] unable to create output pipe for cmd: %s\n", err.Error()) - } - - cmdReaderErr, err := cmd.StderrPipe() - if err != nil { - log.Printf("[ERROR] unable to create error pipe for cmd: %s\n", err.Error()) - } - - scanner := bufio.NewScanner(cmdReader) - go func() { - for scanner.Scan() { - log.Printf("[INFO] %s\n", scanner.Text()) - } - }() - - scanner2 := bufio.NewScanner(cmdReaderErr) - go func() { - for scanner2.Scan() { - log.Printf("[ERROR] service: %s\n", scanner2.Text()) - } - }() - - err = cmd.Start() - if err != nil { - log.Println("[ERROR] service", err.Error()) - } - - // Add service to registry - s.commandCreatedChan <- cmd - - return cmd -} - -type processMap struct { - sync.RWMutex - processes map[int]*exec.Cmd -} - -func (pm *processMap) Get(k int) *exec.Cmd { - pm.RLock() - defer pm.RUnlock() - v, _ := pm.processes[k] - return v -} - -func (pm *processMap) Set(k int, v *exec.Cmd) { - pm.Lock() - defer pm.Unlock() - pm.processes[k] = v -} - -func (pm *processMap) Delete(k int) { - pm.Lock() - defer pm.Unlock() - delete(pm.processes, k) -} diff --git a/vendor/github.com/pact-foundation/pact-go/client/verification_service.go b/vendor/github.com/pact-foundation/pact-go/client/verification_service.go deleted file mode 100644 index 62fe143..0000000 --- a/vendor/github.com/pact-foundation/pact-go/client/verification_service.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "log" - "os" -) - -// VerificationService is a wrapper for the Pact Provider Verifier Service. -type VerificationService struct { - ServiceManager -} - -// NewService creates a new VerificationService with default settings. -// Arguments allowed: -// -// --provider-base-url -// --pact-urls -// --provider-states-url -// --provider-states-setup-url -// --broker-username -// --broker-password -// --publish-verification-results -// --provider-app-version -// --custom-provider-headers -func (v *VerificationService) NewService(args []string) Service { - log.Printf("[DEBUG] starting verification service with args: %v\n", args) - - v.Args = args - v.Cmd = getVerifierCommandPath() - v.Env = append(os.Environ(), `PACT_INTERACTION_RERUN_COMMAND="To re-run this specific test, set the following environment variables and run your test again: PACT_DESCRIPTION=\"\" PACT_PROVIDER_STATE=\"\""`) - - return v -} - -func getVerifierCommandPath() string { - return "pact-provider-verifier" -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/client.go b/vendor/github.com/pact-foundation/pact-go/dsl/client.go deleted file mode 100644 index e1b4e99..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/client.go +++ /dev/null @@ -1,451 +0,0 @@ -package dsl - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "log" - "net" - "net/url" - "regexp" - "strconv" - "strings" - "sync" - "time" - - "github.com/pact-foundation/pact-go/client" - "github.com/pact-foundation/pact-go/types" -) - -// Client is the interface -type Client interface { - // StartServer starts a remote Pact Mock Server. - StartServer(args []string, port int) *types.MockServer - - // ListServers lists all known Mock Servers - ListServers() []*types.MockServer - - // StopServer stops a remote Pact Mock Server. - StopServer(server *types.MockServer) (*types.MockServer, error) - - // RemoveAllServers stops all remote Pact Mock Servers. - RemoveAllServers(server *types.MockServer) []*types.MockServer - - // VerifyProvider runs the verification process against a running Provider. - VerifyProvider(request types.VerifyRequest) ([]types.ProviderVerifierResponse, error) - - // UpdateMessagePact adds a pact message to a contract file - UpdateMessagePact(request types.PactMessageRequest) error - - // ReifyMessage takes a structured object, potentially containing nested Matchers - // and returns an object with just the example (generated) content - // The object may be a simple JSON primitive e.g. string or number or a complex object - ReifyMessage(request *types.PactReificationRequest) (res *types.ReificationResponse, err error) - - // PublishPacts publishes pact files to a Pact Broker - PublishPacts(request types.PublishRequest) error -} - -// PactClient is the main interface into starting/stopping -// the underlying Pact CLI subsystem -type PactClient struct { - pactMockSvcManager client.Service - verificationSvcManager client.Service - messageSvcManager client.Service - publishSvcManager client.Service - - // Track mock servers - Servers []MockService - - // Network Daemon is listening on - Network string - - // Address the Daemon is listening on - Address string - - // TimeoutDuration specifies how long to wait for Pact CLI to start - TimeoutDuration time.Duration -} - -// newClient creates a new Pact client manager with the provided services -func newClient(MockServiceManager client.Service, verificationServiceManager client.Service, messageServiceManager client.Service, publishServiceManager client.Service) *PactClient { - MockServiceManager.Setup() - verificationServiceManager.Setup() - messageServiceManager.Setup() - publishServiceManager.Setup() - - return &PactClient{ - pactMockSvcManager: MockServiceManager, - verificationSvcManager: verificationServiceManager, - messageSvcManager: messageServiceManager, - publishSvcManager: publishServiceManager, - TimeoutDuration: 10 * time.Second, - } -} - -// NewClient creates a new Pact client manager with defaults -func NewClient() *PactClient { - return newClient(&client.MockService{}, &client.VerificationService{}, &client.MessageService{}, &client.PublishService{}) -} - -// StartServer starts a remote Pact Mock Server. -func (p *PactClient) StartServer(args []string, port int) *types.MockServer { - log.Println("[DEBUG] client: starting a server with args:", args, "port:", port) - args = append(args, []string{"--port", strconv.Itoa(port)}...) - svc := p.pactMockSvcManager.NewService(args) - cmd := svc.Start() - - waitForPort(port, p.getNetworkInterface(), p.Address, p.TimeoutDuration, - fmt.Sprintf(`Timed out waiting for Mock Server to start on port %d - are you sure it's running?`, port)) - - return &types.MockServer{ - Pid: cmd.Process.Pid, - Port: port, - } -} - -// ListServers lists all known Mock Servers -func (p *PactClient) ListServers() []*types.MockServer { - log.Println("[DEBUG] client: starting a server") - - var servers []*types.MockServer - - for port, s := range p.pactMockSvcManager.List() { - servers = append(servers, &types.MockServer{ - Pid: s.Process.Pid, - Port: port, - }) - } - - return servers -} - -// StopServer stops a remote Pact Mock Server. -func (p *PactClient) StopServer(server *types.MockServer) (*types.MockServer, error) { - log.Println("[DEBUG] client: stop server") - - // TODO: Need to be able to get a non-zero exit code here! - _, server.Error = p.pactMockSvcManager.Stop(server.Pid) - return server, server.Error -} - -// RemoveAllServers stops all remote Pact Mock Servers. -func (p *PactClient) RemoveAllServers(server *types.MockServer) []*types.MockServer { - log.Println("[DEBUG] client: stop server") - - for _, s := range p.verificationSvcManager.List() { - if s != nil { - p.pactMockSvcManager.Stop(s.Process.Pid) - } - } - return nil -} - -// VerifyProvider runs the verification process against a running Provider. -// TODO: extract/refactor the stdout/error streaems from these functions -func (p *PactClient) VerifyProvider(request types.VerifyRequest) ([]types.ProviderVerifierResponse, error) { - log.Println("[DEBUG] client: verifying a provider") - response := make([]types.ProviderVerifierResponse, 0) - - // Convert request into flags, and validate request - err := request.Validate() - if err != nil { - return response, err - } - - address := getAddress(request.ProviderBaseURL) - port := getPort(request.ProviderBaseURL) - - err = waitForPort(port, p.getNetworkInterface(), address, p.TimeoutDuration, - fmt.Sprintf(`Timed out waiting for Provider API to start on port %d - are you sure it's running?`, port)) - - if err != nil { - return response, err - } - - // Run command, splitting out stderr and stdout. The command can fail for - // several reasons: - // 1. Command is unable to run at all. - // 2. Command runs, but fails for unknown reason. - // 3. Command runs, and returns exit status 1 because the tests fail. - // - // First, attempt to decode the response of the stdout. - // If that is successful, we are at case 3. Return stdout as message, no error. - // Else, return an error, include stderr and stdout in both the error and message. - svc := p.verificationSvcManager.NewService(request.Args) - cmd := svc.Command() - - stdOutPipe, err := cmd.StdoutPipe() - if err != nil { - return response, err - } - stdErrPipe, err := cmd.StderrPipe() - if err != nil { - return response, err - } - - // Buffered channel: wait for all reading to complete - var wg sync.WaitGroup - verifications := []string{} - var stdErr strings.Builder - - // Split by lines, as the content is JSONL formatted - // Each pact is verified by line, and the results (as JSON) sent to stdout. - // See https://github.com/pact-foundation/pact-go/issues/88#issuecomment-404686337 - stdOutScanner := bufio.NewScanner(stdOutPipe) - go func() { - wg.Add(1) - defer wg.Done() - stdOutBuf := make([]byte, bufio.MaxScanTokenSize) - stdOutScanner.Buffer(stdOutBuf, 64*1024*1024) - - for stdOutScanner.Scan() { - verifications = append(verifications, stdOutScanner.Text()) - } - }() - - // Scrape errors - stdErrScanner := bufio.NewScanner(stdErrPipe) - go func() { - wg.Add(1) - defer wg.Done() - for stdErrScanner.Scan() { - stdErr.WriteString(fmt.Sprintf("%s\n", stdErrScanner.Text())) - } - - }() - - err = cmd.Start() - if err != nil { - return response, err - } - - // Wait for watch goroutine before Cmd.Wait(), race condition! - err = cmd.Wait() - wg.Wait() - - var verification types.ProviderVerifierResponse - for _, v := range verifications { - v = strings.TrimSpace(v) - - // TODO: fix once https://github.com/pact-foundation/pact-provider-verifier/issues/26 - // is addressed - // logging to stdout breaks the JSON response - // https://github.com/pact-foundation/pact-ruby/commit/06fa61581512ba5570c315d089f2c0fc23c8cb11 - if v != "" && strings.Index(v, "INFO") != 0 { - dErr := json.Unmarshal([]byte(v), &verification) - - response = append(response, verification) - - if dErr != nil { - err = dErr - } - } - } - - if err == nil { - return response, err - } - - return response, fmt.Errorf("error verifying provider: %s\n\nSTDERR:\n%s\n\nSTDOUT:\n%s", err, stdErr.String(), strings.Join(verifications, "\n")) -} - -// UpdateMessagePact adds a pact message to a contract file -func (p *PactClient) UpdateMessagePact(request types.PactMessageRequest) error { - log.Println("[DEBUG] client: adding pact message...") - - // Convert request into flags, and validate request - err := request.Validate() - if err != nil { - return err - } - - svc := p.messageSvcManager.NewService(request.Args) - cmd := svc.Command() - - stdOutPipe, err := cmd.StdoutPipe() - if err != nil { - return err - } - stdErrPipe, err := cmd.StderrPipe() - if err != nil { - return err - } - err = cmd.Start() - if err != nil { - return err - } - stdOut, err := ioutil.ReadAll(stdOutPipe) - if err != nil { - return err - } - stdErr, err := ioutil.ReadAll(stdErrPipe) - if err != nil { - return err - } - - err = cmd.Wait() - - if err == nil { - return nil - } - - return fmt.Errorf("error creating message: %s\n\nSTDERR:\n%s\n\nSTDOUT:\n%s", err, stdErr, stdOut) -} - -// PublishPacts publishes a set of pacts to a pact broker -func (p *PactClient) PublishPacts(request types.PublishRequest) error { - svc := p.publishSvcManager.NewService(request.Args) - log.Println("[DEBUG] about to publish pacts") - cmd := svc.Start() - - log.Println("[DEBUG] waiting for response") - err := cmd.Wait() - - log.Println("[DEBUG] response from publish", err) - - return err -} - -// ReifyMessage takes a structured object, potentially containing nested Matchers -// and returns an object with just the example (generated) content -// The object may be a simple JSON primitive e.g. string or number or a complex object -func (p *PactClient) ReifyMessage(request *types.PactReificationRequest) (res *types.ReificationResponse, err error) { - log.Println("[DEBUG] client: adding pact message...") - - var responseObject interface{} - res = &types.ReificationResponse{ - Response: responseObject, - } - - // Convert request into flags, and validate request - err = request.Validate() - if err != nil { - return - } - - svc := p.messageSvcManager.NewService(request.Args) - cmd := svc.Command() - - stdOutPipe, err := cmd.StdoutPipe() - if err != nil { - return - } - stdErrPipe, err := cmd.StderrPipe() - if err != nil { - return - } - err = cmd.Start() - if err != nil { - return - } - stdOut, err := ioutil.ReadAll(stdOutPipe) - if err != nil { - return - } - stdErr, err := ioutil.ReadAll(stdErrPipe) - if err != nil { - return - } - - err = cmd.Wait() - - res.ResponseRaw = stdOut - decoder := json.NewDecoder(bytes.NewReader(stdOut)) - - dErr := decoder.Decode(&res.Response) - if dErr == nil { - return - } - - if err == nil { - err = dErr - } - - if err == nil { - return - } - - err = fmt.Errorf("error creating message: %s\n\nSTDERR:\n%s\n\nSTDOUT:\n%s", err, stdErr, stdOut) - - return -} - -// Get a port given a URL -func getPort(rawURL string) int { - parsedURL, err := url.Parse(rawURL) - if err == nil { - splitHost := strings.Split(parsedURL.Host, ":") - if len(splitHost) == 2 { - port, err := strconv.Atoi(splitHost[1]) - if err == nil { - return port - } - } - if parsedURL.Scheme == "https" { - return 443 - } - return 80 - } - - return -1 -} - -// Get the address given a URL -func getAddress(rawURL string) string { - parsedURL, err := url.Parse(rawURL) - if err != nil { - return "" - } - - splitHost := strings.Split(parsedURL.Host, ":") - return splitHost[0] -} - -// Use this to wait for a port to be running prior -// to running tests. -var waitForPort = func(port int, network string, address string, timeoutDuration time.Duration, message string) error { - log.Println("[DEBUG] waiting for port", port, "to become available on", address, "after", timeoutDuration) - timeout := time.After(timeoutDuration) - - for { - select { - case <-timeout: - log.Printf("[ERROR] Expected server to start < %s. %s", timeoutDuration, message) - return fmt.Errorf("Expected server to start < %s. %s", timeoutDuration, message) - case <-time.After(50 * time.Millisecond): - _, err := net.Dial(network, fmt.Sprintf("%s:%d", address, port)) - if err == nil { - return nil - } - } - } -} - -// sanitiseRubyResponse removes Ruby-isms from the response content -// making the output much more human readable -func sanitiseRubyResponse(response string) string { - log.Println("[TRACE] response from Ruby process pre-sanitisation:", response) - - r := regexp.MustCompile("(?m)^\\s*#.*$") - s := r.ReplaceAllString(response, "") - - r = regexp.MustCompile("(?m).*bundle exec rake pact:verify.*$") - s = r.ReplaceAllString(s, "") - - r = regexp.MustCompile("\\n+") - s = r.ReplaceAllString(s, "\n") - - return s -} - -// getNetworkInterface returns a default interface to communicate to the Daemon -// if none specified -func (p *PactClient) getNetworkInterface() string { - if p.Network == "" { - return "tcp" - } - return p.Network -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/interaction.go b/vendor/github.com/pact-foundation/pact-go/dsl/interaction.go deleted file mode 100644 index a94c933..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/interaction.go +++ /dev/null @@ -1,84 +0,0 @@ -package dsl - -import ( - "encoding/json" - "log" -) - -// Interaction is the main implementation of the Pact interface. -type Interaction struct { - // Request - Request Request `json:"request"` - - // Response - Response Response `json:"response"` - - // Description to be written into the Pact file - Description string `json:"description"` - - // Provider state to be written into the Pact file - State string `json:"providerState,omitempty"` -} - -// Given specifies a provider state. Optional. -func (i *Interaction) Given(state string) *Interaction { - i.State = state - - return i -} - -// UponReceiving specifies the name of the test case. This becomes the name of -// the consumer/provider pair in the Pact file. Mandatory. -func (i *Interaction) UponReceiving(description string) *Interaction { - i.Description = description - - return i -} - -// WithRequest specifies the details of the HTTP request that will be used to -// confirm that the Provider provides an API listening on the given interface. -// Mandatory. -func (i *Interaction) WithRequest(request Request) *Interaction { - i.Request = request - - // Check if someone tried to add an object as a string representation - // as per original allowed implementation, e.g. - // { "foo": "bar", "baz": like("bat") } - if isJSONFormattedObject(request.Body) { - log.Println("[WARN] request body appears to be a JSON formatted object, " + - "no structural matching will occur. Support for structured strings has been" + - "deprecated as of 0.13.0") - } - - return i -} - -// WillRespondWith specifies the details of the HTTP response that will be used to -// confirm that the Provider must satisfy. Mandatory. -func (i *Interaction) WillRespondWith(response Response) *Interaction { - i.Response = response - - return i -} - -// Checks to see if someone has tried to submit a JSON string -// for an object, which is no longer supported -func isJSONFormattedObject(stringOrObject interface{}) bool { - switch content := stringOrObject.(type) { - case []byte: - case string: - var obj interface{} - err := json.Unmarshal([]byte(content), &obj) - - if err != nil { - return false - } - - // Check if a map type - if _, ok := obj.(map[string]interface{}); ok { - return true - } - } - - return false -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/matcher.go b/vendor/github.com/pact-foundation/pact-go/dsl/matcher.go deleted file mode 100644 index d663e9e..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/matcher.go +++ /dev/null @@ -1,455 +0,0 @@ -package dsl - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "regexp" - "strings" - "time" -) - -// Term Matcher regexes -const ( - hexadecimal = `[0-9a-fA-F]+` - ipAddress = `(\d{1,3}\.)+\d{1,3}` - ipv6Address = `(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,6}\Z)|(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,5}\Z)|(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,4}\Z)|(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,3}\Z)|(\A([0-9a-f]{1,4}:){1,5}(:[0-9a-f]{1,4}){1,2}\Z)|(\A([0-9a-f]{1,4}:){1,6}(:[0-9a-f]{1,4}){1,1}\Z)|(\A(([0-9a-f]{1,4}:){1,7}|:):\Z)|(\A:(:[0-9a-f]{1,4}){1,7}\Z)|(\A((([0-9a-f]{1,4}:){6})(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A(([0-9a-f]{1,4}:){5}[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3})\Z)|(\A([0-9a-f]{1,4}:){5}:[0-9a-f]{1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,1}(:[0-9a-f]{1,4}){1,4}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,2}(:[0-9a-f]{1,4}){1,3}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,3}(:[0-9a-f]{1,4}){1,2}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A([0-9a-f]{1,4}:){1,4}(:[0-9a-f]{1,4}){1,1}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A(([0-9a-f]{1,4}:){1,5}|:):(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)|(\A:(:[0-9a-f]{1,4}){1,5}:(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\Z)` - uuid = `[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}` - timestamp = `^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3([12]\d|0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\d|[12]\d{2}|3([0-5]\d|6[1-6])))([T\s]((([01]\d|2[0-3])((:?)[0-5]\d)?|24\:?00)([\.,]\d+(?!:))?)?(\17[0-5]\d([\.,]\d+)?)?([zZ]|([\+-])([01]\d|2[0-3]):?([0-5]\d)?)?)?)?$` - date = `^([\+-]?\d{4}(?!\d{2}\b))((-?)((0[1-9]|1[0-2])(\3([12]\d|0[1-9]|3[01]))?|W([0-4]\d|5[0-2])(-?[1-7])?|(00[1-9]|0[1-9]\d|[12]\d{2}|3([0-5]\d|6[1-6])))?)` - timeRegex = `^(T\d\d:\d\d(:\d\d)?(\.\d+)?(([+-]\d\d:\d\d)|Z)?)?$` -) - -var timeExample = time.Date(2000, 2, 1, 12, 30, 0, 0, time.UTC) - -var fullRegex = regexp.MustCompile(`regex=(.*)$`) -var exampleRegex = regexp.MustCompile(`^example=(.*)`) - -type eachLike struct { - Contents interface{} `json:"contents"` - Min int `json:"min"` -} - -func (m eachLike) GetValue() interface{} { - return m.Contents -} - -func (m eachLike) isMatcher() { -} - -func (m eachLike) MarshalJSON() ([]byte, error) { - type marshaler eachLike - - return json.Marshal(struct { - Type string `json:"json_class"` - marshaler - }{"Pact::ArrayLike", marshaler(m)}) -} - -type like struct { - Contents interface{} `json:"contents"` -} - -func (m like) GetValue() interface{} { - return m.Contents -} - -func (m like) isMatcher() { -} - -func (m like) MarshalJSON() ([]byte, error) { - type marshaler like - - return json.Marshal(struct { - Type string `json:"json_class"` - marshaler - }{"Pact::SomethingLike", marshaler(m)}) -} - -type term struct { - Data termData `json:"data"` -} - -func (m term) GetValue() interface{} { - return m.Data.Generate -} - -func (m term) isMatcher() { -} - -func (m term) MarshalJSON() ([]byte, error) { - type marshaler term - - return json.Marshal(struct { - Type string `json:"json_class"` - marshaler - }{"Pact::Term", marshaler(m)}) -} - -type termData struct { - Generate interface{} `json:"generate"` - Matcher termMatcher `json:"matcher"` -} - -type termMatcher struct { - Type string `json:"json_class"` - O int `json:"o"` - Regex interface{} `json:"s"` -} - -// EachLike specifies that a given element in a JSON body can be repeated -// "minRequired" times. Number needs to be 1 or greater -func EachLike(content interface{}, minRequired int) Matcher { - return eachLike{ - Contents: content, - Min: minRequired, - } -} - -// Like specifies that the given content type should be matched based -// on type (int, string etc.) instead of a verbatim match. -func Like(content interface{}) Matcher { - return like{ - Contents: content, - } -} - -// Term specifies that the matching should generate a value -// and also match using a regular expression. -func Term(generate string, matcher string) Matcher { - return term{ - Data: termData{ - Generate: generate, - Matcher: termMatcher{ - Type: "Regexp", - O: 0, - Regex: matcher, - }, - }, - } -} - -// HexValue defines a matcher that accepts hexadecimal values. -func HexValue() Matcher { - return Regex("3F", hexadecimal) -} - -// Identifier defines a matcher that accepts integer values. -func Identifier() Matcher { - return Like(42) -} - -// Integer defines a matcher that accepts ints. Identical to Identifier. -var Integer = Identifier - -// IPAddress defines a matcher that accepts valid IPv4 addresses. -func IPAddress() Matcher { - return Regex("127.0.0.1", ipAddress) -} - -// IPv4Address matches valid IPv4 addresses. -var IPv4Address = IPAddress - -// IPv6Address defines a matcher that accepts IP addresses. -func IPv6Address() Matcher { - return Regex("::ffff:192.0.2.128", ipAddress) -} - -// Decimal defines a matcher that accepts any decimal value. -func Decimal() Matcher { - return Like(42.0) -} - -// Timestamp matches a pattern corresponding to the ISO_DATETIME_FORMAT, which -// is "yyyy-MM-dd'T'HH:mm:ss". The current date and time is used as the eaxmple. -func Timestamp() Matcher { - return Regex(timeExample.Format(time.RFC3339), timestamp) -} - -// Date matches a pattern corresponding to the ISO_DATE_FORMAT, which -// is "yyyy-MM-dd". The current date is used as the eaxmple. -func Date() Matcher { - return Regex(timeExample.Format("2006-01-02"), date) -} - -// Time matches a pattern corresponding to the ISO_DATE_FORMAT, which -// is "'T'HH:mm:ss". The current tem is used as the eaxmple. -func Time() Matcher { - return Regex(timeExample.Format("T15:04:05"), timeRegex) -} - -// UUID defines a matcher that accepts UUIDs. Produces a v4 UUID as the example. -func UUID() Matcher { - return Regex("fc763eba-0905-41c5-a27f-3934ab26786c", uuid) -} - -// Regex is a more appropriately named alias for the "Term" matcher -var Regex = Term - -// Matcher allows various implementations such String or StructMatcher -// to be provided in when matching with the DSL -// We use the strategy outlined at http://www.jerf.org/iri/post/2917 -// to create a "sum" or "union" type. -type Matcher interface { - // isMatcher is how we tell the compiler that strings - // and other types are the same / allowed - isMatcher() - - // GetValue returns the raw generated value for the matcher - // without any of the matching detail context - GetValue() interface{} -} - -// S is the string primitive wrapper (alias) for the Matcher type, -// it allows plain strings to be matched -// To keep backwards compatible with previous versions -// we aren't using an alias here -type S string - -func (s S) isMatcher() {} - -// GetValue returns the raw generated value for the matcher -// without any of the matching detail context -func (s S) GetValue() interface{} { - return s -} - -// String is the longer named form of the string primitive wrapper, -// it allows plain strings to be matched -type String string - -func (s String) isMatcher() {} - -// GetValue returns the raw generated value for the matcher -// without any of the matching detail context -func (s String) GetValue() interface{} { - return s -} - -// StructMatcher matches a complex object structure, which may itself -// contain nested Matchers -type StructMatcher map[string]interface{} - -func (m StructMatcher) isMatcher() {} - -// GetValue returns the raw generated value for the matcher -// without any of the matching detail context -func (m StructMatcher) GetValue() interface{} { - return nil -} - -// MapMatcher allows a map[string]string-like object -// to also contain complex matchers -type MapMatcher map[string]Matcher - -// UnmarshalJSON is a custom JSON parser for MapMatcher -// It treats the matchers as strings -func (m *MapMatcher) UnmarshalJSON(bytes []byte) (err error) { - sk := make(map[string]string) - err = json.Unmarshal(bytes, &sk) - if err != nil { - return - } - - *m = make(map[string]Matcher) - for k, v := range sk { - (*m)[k] = String(v) - } - - return -} - -// Takes an object and converts it to a JSON representation -func objectToString(obj interface{}) string { - switch content := obj.(type) { - case string: - return content - default: - jsonString, err := json.Marshal(obj) - if err != nil { - log.Println("[DEBUG] objectToString: error unmarshaling object into string:", err.Error()) - return "" - } - return string(jsonString) - } -} - -// Match recursively traverses the provided type and outputs a -// matcher string for it that is compatible with the Pact dsl. -// By default, it requires slices to have a minimum of 1 element. -// For concrete types, it uses `dsl.Like` to assert that types match. -// Optionally, you may override these defaults by supplying custom -// pact tags on your structs. -// -// Supported Tag Formats -// Minimum Slice Size: `pact:"min=2"` -// String RegEx: `pact:"example=2000-01-01,regex=^\\d{4}-\\d{2}-\\d{2}$"` -func Match(src interface{}) Matcher { - return match(reflect.TypeOf(src), getDefaults()) -} - -// match recursively traverses the provided type and outputs a -// matcher string for it that is compatible with the Pact dsl. -func match(srcType reflect.Type, params params) Matcher { - switch kind := srcType.Kind(); kind { - case reflect.Ptr: - return match(srcType.Elem(), params) - case reflect.Slice, reflect.Array: - return EachLike(match(srcType.Elem(), getDefaults()), params.slice.min) - case reflect.Struct: - result := StructMatcher{} - - for i := 0; i < srcType.NumField(); i++ { - field := srcType.Field(i) - fieldName := getJsonFieldName(field) - if fieldName == "" { - continue - } - result[fieldName] = match(field.Type, pluckParams(field.Type, field.Tag.Get("pact"))) - } - return result - case reflect.String: - if params.str.regEx != "" { - return Term(params.str.example, params.str.regEx) - } - if params.str.example != "" { - return Like(params.str.example) - } - - return Like("string") - case reflect.Bool: - if params.boolean.defined { - return Like(params.boolean.value) - } - return Like(true) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if params.number.integer != 0 { - return Like(params.number.integer) - } - return Like(1) - case reflect.Float32, reflect.Float64: - if params.number.float != 0 { - return Like(params.number.float) - } - return Like(1.1) - default: - panic(fmt.Sprintf("match: unhandled type: %v", srcType)) - } -} - -// getJsonFieldName retrieves the name for a JSON field as -// https://golang.org/pkg/encoding/json/#Marshal would do. -func getJsonFieldName(field reflect.StructField) string { - jsonTag := field.Tag.Get("json") - if jsonTag == "" { - return field.Name - } - // Field should be ignored according to the JSON marshal documentation. - if jsonTag == "-" { - return "" - } - commaIndex := strings.Index(jsonTag, ",") - if commaIndex > -1 { - return jsonTag[:commaIndex] - } - return jsonTag -} - -// params are plucked from 'pact' struct tags as match() traverses -// struct fields. They are passed back into match() along with their -// associated type to serve as parameters for the dsl functions. -type params struct { - slice sliceParams - str stringParams - number numberParams - boolean boolParams -} - -type numberParams struct { - integer int - float float32 -} -type boolParams struct { - value bool - defined bool -} - -type sliceParams struct { - min int -} - -type stringParams struct { - example string - regEx string -} - -// getDefaults returns the default params -func getDefaults() params { - return params{ - slice: sliceParams{ - min: 1, - }, - } -} - -// pluckParams converts a 'pact' tag into a pactParams struct -// Supported Tag Formats -// Minimum Slice Size: `pact:"min=2"` -// String RegEx: `pact:"example=2000-01-01,regex=^\\d{4}-\\d{2}-\\d{2}$"` -func pluckParams(srcType reflect.Type, pactTag string) params { - params := getDefaults() - if pactTag == "" { - return params - } - - switch kind := srcType.Kind(); kind { - case reflect.Bool: - if _, err := fmt.Sscanf(pactTag, "example=%t", ¶ms.boolean.value); err != nil { - triggerInvalidPactTagPanic(pactTag, err) - } - params.boolean.defined = true - case reflect.Float32, reflect.Float64: - if _, err := fmt.Sscanf(pactTag, "example=%g", ¶ms.number.float); err != nil { - triggerInvalidPactTagPanic(pactTag, err) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - if _, err := fmt.Sscanf(pactTag, "example=%d", ¶ms.number.integer); err != nil { - triggerInvalidPactTagPanic(pactTag, err) - } - case reflect.Slice: - if _, err := fmt.Sscanf(pactTag, "min=%d", ¶ms.slice.min); err != nil { - triggerInvalidPactTagPanic(pactTag, err) - } - case reflect.String: - if fullRegex.Match([]byte(pactTag)) { - components := strings.Split(pactTag, ",regex=") - - if len(components[1]) == 0 { - triggerInvalidPactTagPanic(pactTag, fmt.Errorf("invalid format: regex must not be empty")) - } - - if _, err := fmt.Sscanf(components[0], "example=%s", ¶ms.str.example); err != nil { - triggerInvalidPactTagPanic(pactTag, err) - } - params.str.regEx = components[1] - - } else if exampleRegex.Match([]byte(pactTag)) { - components := strings.Split(pactTag, "example=") - - if len(components) != 2 || strings.TrimSpace(components[1]) == "" { - triggerInvalidPactTagPanic(pactTag, fmt.Errorf("invalid format: example must not be empty")) - } - - params.str.example = components[1] - } - } - - return params -} - -func triggerInvalidPactTagPanic(tag string, err error) { - panic(fmt.Sprintf("match: encountered invalid pact tag %q . . . parsing failed with error: %v", tag, err)) -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/message.go b/vendor/github.com/pact-foundation/pact-go/dsl/message.go deleted file mode 100644 index 809cd38..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/message.go +++ /dev/null @@ -1,97 +0,0 @@ -package dsl - -import ( - "log" - "reflect" -) - -// StateHandler is a provider function that sets up a given state before -// the provider interaction is validated -type StateHandler func(State) error - -// StateHandlers is a list of StateHandler's -type StateHandlers map[string]StateHandler - -// MessageHandler is a provider function that generates a -// message for a Consumer given a Message context (state, description etc.) -type MessageHandler func(Message) (interface{}, error) - -// MessageHandlers is a list of handlers ordered by description -type MessageHandlers map[string]MessageHandler - -// MessageConsumer receives a message and must be able to parse -// the content -type MessageConsumer func(Message) error - -// Message is a representation of a single, unidirectional message -// e.g. MQ, pub/sub, Websocket, Lambda -// Message is the main implementation of the Pact Message interface. -type Message struct { - // Message Body - Content interface{} `json:"contents,omitempty"` - - // Message Body as a Raw JSON string - ContentRaw interface{} `json:"-"` - - // Provider state to be written into the Pact file - States []State `json:"providerStates,omitempty"` - - // Message metadata - Metadata MapMatcher `json:"metadata,omitempty"` - - // Description to be written into the Pact file - Description string `json:"description"` - - // Type to Marshall content into when sending back to the consumer - // Defaults to interface{} - Type interface{} - - Args []string `json:"-"` -} - -// State specifies how the system should be configured when -// verified. e.g. "user A exists" -type State struct { - Name string `json:"name"` - Params map[string]interface{} `json:"params,omitempty"` -} - -// Given specifies a provider state. Optional. -func (p *Message) Given(state string) *Message { - p.States = []State{State{Name: state}} - - return p -} - -// ExpectsToReceive specifies the content it is expecting to be -// given from the Provider. The function must be able to handle this -// message for the interaction to succeed. -func (p *Message) ExpectsToReceive(description string) *Message { - p.Description = description - return p -} - -// WithMetadata specifies message-implementation specific metadata -// to go with the content -func (p *Message) WithMetadata(metadata MapMatcher) *Message { - p.Metadata = metadata - return p -} - -// WithContent specifies the details of the HTTP request that will be used to -// confirm that the Provider provides an API listening on the given interface. -// Mandatory. -func (p *Message) WithContent(content interface{}) *Message { - p.Content = content - - return p -} - -// AsType specifies that the content sent through to the -// consumer handler should be sent as the given type -func (p *Message) AsType(t interface{}) *Message { - log.Println("[DEBUG] setting Message decoding to type:", reflect.TypeOf(t)) - p.Type = t - - return p -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/mock_client.go b/vendor/github.com/pact-foundation/pact-go/dsl/mock_client.go deleted file mode 100644 index dc46b40..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/mock_client.go +++ /dev/null @@ -1,76 +0,0 @@ -package dsl - -import ( - "github.com/pact-foundation/pact-go/types" -) - -// Mock Client for testing the DSL package -type mockClient struct { - VerifyProviderResponse []types.ProviderVerifierResponse - VerifyProviderError error - Servers []*types.MockServer - StopServerResponse *types.MockServer - StopServerError error - RemoveAllServersResponse []*types.MockServer - MockServer *types.MockServer - ReifyMessageResponse *types.ReificationResponse - ReifyMessageError error - UpdateMessagePactError error - PublishPactsError error -} - -func newMockClient() *mockClient { - return &mockClient{ - MockServer: &types.MockServer{ - Pid: 0, - Port: 0, - }, - ReifyMessageResponse: &types.ReificationResponse{ - Response: map[string]string{ - "foo": "bar", - }, - }, - } -} - -// StartServer starts a remote Pact Mock Server. -func (p *mockClient) StartServer(args []string, port int) *types.MockServer { - return p.MockServer -} - -// ListServers lists all known Mock Servers -func (p *mockClient) ListServers() []*types.MockServer { - return p.Servers -} - -// StopServer stops a remote Pact Mock Server. -func (p *mockClient) StopServer(server *types.MockServer) (*types.MockServer, error) { - return p.StopServerResponse, p.StopServerError -} - -// RemoveAllServers stops all remote Pact Mock Servers. -func (p *mockClient) RemoveAllServers(server *types.MockServer) []*types.MockServer { - return p.RemoveAllServersResponse -} - -// VerifyProvider runs the verification process against a running Provider. -func (p *mockClient) VerifyProvider(request types.VerifyRequest) ([]types.ProviderVerifierResponse, error) { - return p.VerifyProviderResponse, p.VerifyProviderError -} - -// UpdateMessagePact adds a pact message to a contract file -func (p *mockClient) UpdateMessagePact(request types.PactMessageRequest) error { - return p.UpdateMessagePactError -} - -// ReifyMessage takes a structured object, potentially containing nested Matchers -// and returns an object with just the example (generated) content -// The object may be a simple JSON primitive e.g. string or number or a complex object -func (p *mockClient) ReifyMessage(request *types.PactReificationRequest) (res *types.ReificationResponse, err error) { - return p.ReifyMessageResponse, p.ReifyMessageError -} - -// PublishPacts publishes pacts to a broker -func (p *mockClient) PublishPacts(request types.PublishRequest) error { - return p.PublishPactsError -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/mock_service.go b/vendor/github.com/pact-foundation/pact-go/dsl/mock_service.go deleted file mode 100644 index e693396..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/mock_service.go +++ /dev/null @@ -1,114 +0,0 @@ -package dsl - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net/http" -) - -// MockService is the HTTP interface to setup the Pact Mock Service -// See https://github.com/bethesque/pact-mock_service and -// https://gist.github.com/bethesque/9d81f21d6f77650811f4. -type MockService struct { - // BaseURL is the base host for the Pact Mock Service. - BaseURL string - - // Consumer name. - Consumer string - - // Provider name. - Provider string - - // PactFileWriteMode specifies how to write to the Pact file, for the life - // of a Mock Service. - // "overwrite" will always truncate and replace the pact after each run - // "update" will append to the pact file, which is useful if your tests - // are split over multiple files and instantiations of a Mock Server - // See https://github.com/pact-foundation/pact-ruby/blob/master/documentation/configuration.md#pactfile_write_mode - PactFileWriteMode string -} - -// call sends a message to the Pact service -func (m *MockService) call(method string, url string, content interface{}) error { - body, err := json.Marshal(content) - if err != nil { - log.Println("[ERROR]", err) - return err - } - - client := &http.Client{} - var req *http.Request - if method == "POST" { - req, err = http.NewRequest(method, url, bytes.NewReader(body)) - } else { - req, err = http.NewRequest(method, url, nil) - } - if err != nil { - return err - } - - req.Header.Set("X-Pact-Mock-Service", "true") - req.Header.Set("Content-Type", "application/json") - - res, err := client.Do(req) - if err != nil { - return err - } - - responseBody, err := ioutil.ReadAll(res.Body) - res.Body.Close() - if res.StatusCode < 200 || res.StatusCode >= 300 { - return errors.New(string(responseBody)) - } - return err -} - -// DeleteInteractions removes any previous Mock Service Interactions. -func (m *MockService) DeleteInteractions() error { - log.Println("[DEBUG] mock service delete interactions") - url := fmt.Sprintf("%s/interactions", m.BaseURL) - return m.call("DELETE", url, nil) -} - -// AddInteraction adds a new Pact Mock Service interaction. -func (m *MockService) AddInteraction(interaction *Interaction) error { - log.Println("[DEBUG] mock service add interaction") - url := fmt.Sprintf("%s/interactions", m.BaseURL) - return m.call("POST", url, interaction) -} - -// Verify confirms that all interactions were called. -func (m *MockService) Verify() error { - log.Println("[DEBUG] mock service verify") - url := fmt.Sprintf("%s/interactions/verification", m.BaseURL) - return m.call("GET", url, nil) -} - -// WritePact writes the pact file to disk. -func (m *MockService) WritePact() error { - log.Println("[DEBUG] mock service write pact") - - if m.Consumer == "" || m.Provider == "" { - return errors.New("Consumer and Provider name need to be provided") - } - if m.PactFileWriteMode == "" { - m.PactFileWriteMode = "overwrite" - } - - pact := map[string]interface{}{ - "consumer": map[string]string{ - "name": m.Consumer, - }, - "provider": map[string]string{ - "name": m.Provider, - }, - "pactFileWriteMode": m.PactFileWriteMode, - } - - url := fmt.Sprintf("%s/pact", m.BaseURL) - return m.call("POST", url, pact) -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/pact.go b/vendor/github.com/pact-foundation/pact-go/dsl/pact.go deleted file mode 100644 index 3e45280..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/pact.go +++ /dev/null @@ -1,772 +0,0 @@ -/* -Package dsl contains the main Pact DSL used in the Consumer -collaboration test cases, and Provider contract test verification. -*/ -package dsl - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - - "github.com/hashicorp/logutils" - "github.com/pact-foundation/pact-go/install" - "github.com/pact-foundation/pact-go/proxy" - "github.com/pact-foundation/pact-go/types" - "github.com/pact-foundation/pact-go/utils" -) - -// Pact is the container structure to run the Consumer Pact test cases. -type Pact struct { - // Current server for the consumer. - Server *types.MockServer - - // Pact RPC Client. - pactClient Client - - // Consumer is the name of the Consumer/Client. - Consumer string - - // Provider is the name of the Providing service. - Provider string - - // Interactions contains all of the Mock Service Interactions to be setup. - Interactions []*Interaction - - // MessageInteractions contains all of the Message based interactions to be setup. - MessageInteractions []*Message - - // Log levels. - LogLevel string - - // Used to detect if logging has been configured. - logFilter *logutils.LevelFilter - - // Location of Pact external service invocation output logging. - // Defaults to `/logs`. - LogDir string - - // Pact files will be saved in this folder. - // Defaults to `/pacts`. - PactDir string - - // PactFileWriteMode specifies how to write to the Pact file, for the life - // of a Mock Service. - // "overwrite" will always truncate and replace the pact after each run - // "merge" will append to the pact file, which is useful if your tests - // are split over multiple files and instantiations of a Mock Server - // See https://github.com/pact-foundation/pact-ruby/blob/master/documentation/configuration.md#pactfile_write_mode - PactFileWriteMode string - - // Specify which version of the Pact Specification should be used (1 or 2). - // Defaults to 2. - SpecificationVersion int - - // Host is the address of the Mock and Verification Service runs on - // Examples include 'localhost', '127.0.0.1', '[::1]' - // Defaults to 'localhost' - Host string - - // Network is the network of the Mock and Verification Service - // Examples include 'tcp', 'tcp4', 'tcp6' - // Defaults to 'tcp' - Network string - - // Ports MockServer can be deployed to, can be CSV or Range with a dash - // Example "1234", "12324,5667", "1234-5667" - AllowedMockServerPorts string - - // DisableToolValidityCheck prevents CLI version checking - use this carefully! - // The ideal situation is to check the tool installation with before running - // the tests, which should speed up large test suites significantly - DisableToolValidityCheck bool - - // ClientTimeout specifies how long to wait for Pact CLI to start - // Can be increased to reduce likelihood of intermittent failure - // Defaults to 10s - ClientTimeout time.Duration - - // Check if CLI tools are up to date - toolValidityCheck bool -} - -// AddMessage creates a new asynchronous consumer expectation -func (p *Pact) AddMessage() *Message { - p.setupLogging() - log.Println("[DEBUG] pact add message") - - m := &Message{} - p.MessageInteractions = append(p.MessageInteractions, m) - return m -} - -// AddInteraction creates a new Pact interaction, initialising all -// required things. Will automatically start a Mock Service if none running. -func (p *Pact) AddInteraction() *Interaction { - p.Setup(true) - log.Println("[DEBUG] pact add interaction") - i := &Interaction{} - p.Interactions = append(p.Interactions, i) - return i -} - -// Setup starts the Pact Mock Server. This is usually called before each test -// suite begins. AddInteraction() will automatically call this if no Mock Server -// has been started. -func (p *Pact) Setup(startMockServer bool) *Pact { - p.setupLogging() - log.Println("[DEBUG] pact setup") - dir, _ := os.Getwd() - - if p.Network == "" { - p.Network = "tcp" - } - - if !p.toolValidityCheck && !(p.DisableToolValidityCheck || os.Getenv("PACT_DISABLE_TOOL_VALIDITY_CHECK") != "") { - checkCliCompatibility() - p.toolValidityCheck = true - } - - if p.Host == "" { - p.Host = "localhost" - } - - if p.LogDir == "" { - p.LogDir = fmt.Sprintf(filepath.Join(dir, "logs")) - } - - if p.PactDir == "" { - p.PactDir = fmt.Sprintf(filepath.Join(dir, "pacts")) - } - - if p.SpecificationVersion == 0 { - p.SpecificationVersion = 2 - } - - if p.ClientTimeout == 0 { - p.ClientTimeout = 10 * time.Second - } - - if p.pactClient == nil { - c := NewClient() - c.TimeoutDuration = p.ClientTimeout - p.pactClient = c - } - - if p.PactFileWriteMode == "" { - p.PactFileWriteMode = "overwrite" - } - - // Need to predefine due to scoping - var port int - var perr error - if p.AllowedMockServerPorts != "" { - port, perr = utils.FindPortInRange(p.AllowedMockServerPorts) - } else { - port, perr = utils.GetFreePort() - } - if perr != nil { - log.Println("[ERROR] unable to find free port, mockserver will fail to start") - } - - if p.Server == nil && startMockServer { - log.Println("[DEBUG] starting mock service on port:", port) - args := []string{ - "--pact-specification-version", - fmt.Sprintf("%d", p.SpecificationVersion), - "--pact-dir", - filepath.FromSlash(p.PactDir), - "--log", - filepath.FromSlash(p.LogDir + "/" + "pact.log"), - "--consumer", - p.Consumer, - "--provider", - p.Provider, - "--pact-file-write-mode", - p.PactFileWriteMode, - } - - p.Server = p.pactClient.StartServer(args, port) - } - - return p -} - -// Configure logging -func (p *Pact) setupLogging() { - if p.logFilter == nil { - if p.LogLevel == "" { - p.LogLevel = "INFO" - } - p.logFilter = &logutils.LevelFilter{ - Levels: []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}, - MinLevel: logutils.LogLevel(p.LogLevel), - Writer: os.Stderr, - } - log.SetOutput(p.logFilter) - } - log.Println("[DEBUG] pact setup logging") -} - -// Teardown stops the Pact Mock Server. This usually is called on completion -// of each test suite. -func (p *Pact) Teardown() *Pact { - log.Println("[DEBUG] teardown") - if p.Server != nil { - server, err := p.pactClient.StopServer(p.Server) - - if err != nil { - log.Println("error:", err) - } - p.Server = server - } - return p -} - -// Verify runs the current test case against a Mock Service. -// Will cleanup interactions between tests within a suite. -func (p *Pact) Verify(integrationTest func() error) error { - p.Setup(true) - log.Println("[DEBUG] pact verify") - var err error - - // Check if we are verifying messages or if we actually have interactions - if len(p.Interactions) == 0 { - return errors.New("there are no interactions to be verified") - } - - mockServer := &MockService{ - BaseURL: fmt.Sprintf("http://%s:%d", p.Host, p.Server.Port), - Consumer: p.Consumer, - Provider: p.Provider, - } - - // Cleanup all interactions - defer func(mockServer *MockService) { - log.Println("[DEBUG] clearing interactions") - - p.Interactions = make([]*Interaction, 0) - err = mockServer.DeleteInteractions() - }(mockServer) - - for _, interaction := range p.Interactions { - err = mockServer.AddInteraction(interaction) - if err != nil { - return err - } - } - - // Run the integration test - err = integrationTest() - if err != nil { - return err - } - - // Run Verification Process - err = mockServer.Verify() - if err != nil { - return err - } - - return err -} - -// WritePact should be called writes when all tests have been performed for a -// given Consumer <-> Provider pair. It will write out the Pact to the -// configured file. -func (p *Pact) WritePact() error { - p.Setup(true) - log.Println("[DEBUG] pact write Pact file") - mockServer := MockService{ - BaseURL: fmt.Sprintf("http://%s:%d", p.Host, p.Server.Port), - Consumer: p.Consumer, - Provider: p.Provider, - PactFileWriteMode: p.PactFileWriteMode, - } - err := mockServer.WritePact() - if err != nil { - return err - } - - return nil -} - -// VerifyProviderRaw reads the provided pact files and runs verification against -// a running Provider API, providing raw response from the Verification process. -// -// Order of events: BeforeEach, stateHandlers, requestFilter(pre post), AfterEach -func (p *Pact) VerifyProviderRaw(request types.VerifyRequest) ([]types.ProviderVerifierResponse, error) { - p.Setup(false) - res := make([]types.ProviderVerifierResponse, 0) - - u, err := url.Parse(request.ProviderBaseURL) - - if err != nil { - return res, err - } - - m := []proxy.Middleware{} - - if request.BeforeEach != nil { - m = append(m, BeforeEachMiddleware(request.BeforeEach)) - } - - if request.AfterEach != nil { - m = append(m, AfterEachMiddleware(request.AfterEach)) - } - - if len(request.StateHandlers) > 0 { - m = append(m, stateHandlerMiddleware(request.StateHandlers)) - } - - if request.RequestFilter != nil { - m = append(m, request.RequestFilter) - } - - // Configure HTTP Verification Proxy - opts := proxy.Options{ - TargetAddress: fmt.Sprintf("%s:%s", u.Hostname(), u.Port()), - TargetScheme: u.Scheme, - TargetPath: u.Path, - Middleware: m, - InternalRequestPathPrefix: providerStatesSetupPath, - CustomTLSConfig: request.CustomTLSConfig, - } - - // Starts the message wrapper API with hooks back to the state handlers - // This maps the 'description' field of a message pact, to a function handler - // that will implement the message producer. This function must return an object and optionally - // and error. The object will be marshalled to JSON for comparison. - port, err := proxy.HTTPReverseProxy(opts) - - // Backwards compatibility, setup old provider states URL if given - // Otherwise point to proxy - setupURL := request.ProviderStatesSetupURL - if request.ProviderStatesSetupURL == "" && len(request.StateHandlers) > 0 { - setupURL = fmt.Sprintf("http://localhost:%d%s", port, providerStatesSetupPath) - } - - // Construct verifier request - verificationRequest := types.VerifyRequest{ - ProviderBaseURL: fmt.Sprintf("http://localhost:%d", port), - PactURLs: request.PactURLs, - BrokerURL: request.BrokerURL, - Tags: request.Tags, - BrokerUsername: request.BrokerUsername, - BrokerPassword: request.BrokerPassword, - BrokerToken: request.BrokerToken, - PublishVerificationResults: request.PublishVerificationResults, - ProviderVersion: request.ProviderVersion, - Provider: request.Provider, - ProviderStatesSetupURL: setupURL, - CustomProviderHeaders: request.CustomProviderHeaders, - ConsumerVersionSelectors: request.ConsumerVersionSelectors, - EnablePending: request.EnablePending, - ProviderTags: request.ProviderTags, - Verbose: request.Verbose, - FailIfNoPactsFound: request.FailIfNoPactsFound, - IncludeWIPPactsSince: request.IncludeWIPPactsSince, - } - - if request.Provider == "" { - verificationRequest.Provider = p.Provider - } - - portErr := waitForPort(port, "tcp", "localhost", p.ClientTimeout, - fmt.Sprintf(`Timed out waiting for http verification proxy on port %d - check for errors`, port)) - - if portErr != nil { - log.Fatal("Error:", err) - return res, portErr - } - - log.Println("[DEBUG] pact provider verification") - - return p.pactClient.VerifyProvider(verificationRequest) -} - -// VerifyProvider accepts an instance of `*testing.T` -// running the provider verification with granular test reporting and -// automatic failure reporting for nice, simple tests. -func (p *Pact) VerifyProvider(t *testing.T, request types.VerifyRequest) ([]types.ProviderVerifierResponse, error) { - res, err := p.VerifyProviderRaw(request) - - if len(res) == 0 { - var message = "no pacts found to verify" - if err != nil { - message = "error verifying the provider: see returned error for detail" - } - - if len(request.Tags) > 0 { - message = fmt.Sprintf("%s. Check the tags provided (%s) for your broker (%s) are correct", message, strings.Join(request.Tags, ","), request.BrokerURL) - } - - if request.FailIfNoPactsFound { - t.Errorf(message) - } else { - t.Logf(message) - } - } - - runTestCases(t, res) - - return res, err -} - -var installer = install.NewInstaller() - -var checkCliCompatibility = func() { - log.Println("[DEBUG] checking CLI compatibility") - err := installer.CheckInstallation() - - if err != nil { - log.Fatal("[ERROR] CLI tools are out of date, please upgrade before continuing") - } -} - -// BeforeEachMiddleware is invoked before any other, only on the __setup -// request (to avoid duplication) -func BeforeEachMiddleware(BeforeEach types.Hook) proxy.Middleware { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == providerStatesSetupPath { - - log.Println("[DEBUG] executing before hook") - err := BeforeEach() - - if err != nil { - log.Println("[ERROR] error executing before hook:", err) - w.WriteHeader(http.StatusInternalServerError) - } - } - next.ServeHTTP(w, r) - }) - } -} - -// AfterEachMiddleware is invoked after any other, and is the last -// function to be called prior to returning to the test suite. It is -// therefore not invoked on __setup -func AfterEachMiddleware(AfterEach types.Hook) proxy.Middleware { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - next.ServeHTTP(w, r) - - if r.URL.Path != providerStatesSetupPath { - log.Println("[DEBUG] executing after hook") - err := AfterEach() - - if err != nil { - log.Println("[ERROR] error executing after hook:", err) - w.WriteHeader(http.StatusInternalServerError) - } - } - }) - } -} - -// stateHandlerMiddleware responds to the various states that are -// given during provider verification -// -// statehandler accepts a state object from the verifier and executes -// any state handlers associated with the provider. -// It will not execute further middleware if it is the designted "state" request -func stateHandlerMiddleware(stateHandlers types.StateHandlers) proxy.Middleware { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == providerStatesSetupPath { - var s *types.ProviderState - decoder := json.NewDecoder(r.Body) - decoder.Decode(&s) - - // Setup any provider state - for _, state := range s.States { - sf, stateFound := stateHandlers[state] - - if !stateFound { - log.Printf("[WARN] state handler not found for state: %v", state) - } else { - // Execute state handler - if err := sf(); err != nil { - log.Printf("[ERROR] state handler for '%v' errored: %v", state, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } - } - - w.WriteHeader(http.StatusOK) - return - } - - log.Println("[DEBUG] skipping state handler for request", r.RequestURI) - - // Pass through to application - next.ServeHTTP(w, r) - }) - } -} - -var messageVerificationHandler = func(messageHandlers MessageHandlers, stateHandlers StateHandlers) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - // Extract message - var message Message - body, err := ioutil.ReadAll(r.Body) - r.Body.Close() - - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - - err = json.Unmarshal(body, &message) - - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - - // Setup any provider state - for _, state := range message.States { - sf, stateFound := stateHandlers[state.Name] - - if !stateFound { - log.Printf("[WARN] state handler not found for state: %v", state.Name) - } else { - // Execute state handler - if err = sf(state); err != nil { - log.Printf("[WARN] state handler for '%v' return error: %v", state.Name, err) - w.WriteHeader(http.StatusInternalServerError) - return - } - } - } - - // Lookup key in function mapping - f, messageFound := messageHandlers[message.Description] - - if !messageFound { - log.Printf("[ERROR] message handler not found for message description: %v", message.Description) - w.WriteHeader(http.StatusNotFound) - return - } - - // Execute function handler - res, handlerErr := f(message) - - if handlerErr != nil { - w.WriteHeader(http.StatusServiceUnavailable) - return - } - - wrappedResponse := map[string]interface{}{ - "contents": res, - } - - // Write the body back - resBody, errM := json.Marshal(wrappedResponse) - if errM != nil { - w.WriteHeader(http.StatusServiceUnavailable) - log.Println("[ERROR] error marshalling objcet:", errM) - return - } - - w.WriteHeader(http.StatusOK) - w.Write(resBody) - } -} - -func generateTestCaseName(res types.ProviderVerifierResponse) string { - if len(res.Examples) > 1 { - return fmt.Sprintf("Pact between %s and %s %s", res.Examples[0].Pact.ConsumerName, res.Examples[0].Pact.ProviderName, res.Examples[0].Pact.ShortDescription) - } - return "Running pact test" -} - -// VerifyMessageProvider accepts an instance of `*testing.T` -// running provider message verification with granular test reporting and -// automatic failure reporting for nice, simple tests. -// -// A Message Producer is analogous to Consumer in the HTTP Interaction model. -// It is the initiator of an interaction, and expects something on the other end -// of the interaction to respond - just in this case, not immediately. -func (p *Pact) VerifyMessageProvider(t *testing.T, request VerifyMessageRequest) (res []types.ProviderVerifierResponse, err error) { - res, err = p.VerifyMessageProviderRaw(request) - - runTestCases(t, res) - - return -} - -func runTestCases(t *testing.T, res []types.ProviderVerifierResponse) { - for _, test := range res { - t.Run(generateTestCaseName(test), func(pactTest *testing.T) { - for _, notice := range test.Summary.Notices { - if notice.When == "before_verification" { - t.Logf("notice: %s", notice.Text) - } - } - for _, example := range test.Examples { - testCase := example.Description - if example.Status == "pending" { - testCase = fmt.Sprintf("Pending %s", example.Description) - } - - t.Run(testCase, func(st *testing.T) { - st.Log(example.FullDescription) - - if example.Status != "passed" { - if example.Status == "pending" { - st.Skip(example.Exception.Message) - } else { - st.Errorf("%s\n%s\n", example.FullDescription, example.Exception.Message) - } - } - }) - } - for _, notice := range test.Summary.Notices { - if notice.When == "after_verification" { - t.Logf("notice: %s", notice.Text) - } - } - }) - } -} - -// VerifyMessageProviderRaw runs provider message verification. -// -// A Message Producer is analogous to Consumer in the HTTP Interaction model. -// It is the initiator of an interaction, and expects something on the other end -// of the interaction to respond - just in this case, not immediately. -func (p *Pact) VerifyMessageProviderRaw(request VerifyMessageRequest) ([]types.ProviderVerifierResponse, error) { - p.Setup(false) - response := make([]types.ProviderVerifierResponse, 0) - - // Starts the message wrapper API with hooks back to the message handlers - // This maps the 'description' field of a message pact, to a function handler - // that will implement the message producer. This function must return an object and optionally - // and error. The object will be marshalled to JSON for comparison. - mux := http.NewServeMux() - - port, err := utils.GetFreePort() - if err != nil { - return response, fmt.Errorf("unable to allocate a port for verification: %v", err) - } - - // Construct verifier request - verificationRequest := types.VerifyRequest{ - ProviderBaseURL: fmt.Sprintf("http://localhost:%d", port), - PactURLs: request.PactURLs, - BrokerURL: request.BrokerURL, - Tags: request.Tags, - BrokerUsername: request.BrokerUsername, - BrokerPassword: request.BrokerPassword, - BrokerToken: request.BrokerToken, - PublishVerificationResults: request.PublishVerificationResults, - ProviderVersion: request.ProviderVersion, - ProviderTags: request.ProviderTags, - Provider: p.Provider, - } - - mux.HandleFunc("/", messageVerificationHandler(request.MessageHandlers, request.StateHandlers)) - - ln, err := net.Listen("tcp", fmt.Sprintf(":%d", port)) - if err != nil { - log.Fatal(err) - } - defer ln.Close() - - log.Printf("[DEBUG] API handler starting: port %d (%s)", port, ln.Addr()) - go http.Serve(ln, mux) - - portErr := waitForPort(port, "tcp", "localhost", p.ClientTimeout, - fmt.Sprintf(`Timed out waiting for pact proxy on port %d - check for errors`, port)) - - if portErr != nil { - log.Fatal("Error:", err) - return response, portErr - } - - log.Println("[DEBUG] pact provider verification") - return p.pactClient.VerifyProvider(verificationRequest) -} - -// VerifyMessageConsumerRaw creates a new Pact _message_ interaction to build a testable -// interaction. -// -// -// A Message Consumer is analogous to a Provider in the HTTP Interaction model. -// It is the receiver of an interaction, and needs to be able to handle whatever -// request was provided. -func (p *Pact) VerifyMessageConsumerRaw(message *Message, handler MessageConsumer) error { - log.Printf("[DEBUG] verify message") - p.Setup(false) - - // Reify the message back to its "example/generated" form - reified, err := p.pactClient.ReifyMessage(&types.PactReificationRequest{ - Message: message.Content, - }) - - if err != nil { - return fmt.Errorf("unable to convert consumer test to a valid JSON representation: %v", err) - } - - t := reflect.TypeOf(message.Type) - if t != nil && t.Name() != "interface" { - log.Println("[DEBUG] narrowing type to", t.Name()) - err = json.Unmarshal(reified.ResponseRaw, &message.Type) - - if err != nil { - return fmt.Errorf("unable to narrow type to %v: %v", t.Name(), err) - } - } - - // Yield message, and send through handler function - generatedMessage := - Message{ - Content: message.Type, - States: message.States, - Description: message.Description, - Metadata: message.Metadata, - } - - err = handler(generatedMessage) - if err != nil { - return err - } - - // If no errors, update Message Pact - return p.pactClient.UpdateMessagePact(types.PactMessageRequest{ - Message: message, - Consumer: p.Consumer, - Provider: p.Provider, - PactDir: p.PactDir, - }) -} - -// VerifyMessageConsumer is a test convience function for VerifyMessageConsumerRaw, -// accepting an instance of `*testing.T` -func (p *Pact) VerifyMessageConsumer(t *testing.T, message *Message, handler MessageConsumer) error { - err := p.VerifyMessageConsumerRaw(message, handler) - - if err != nil { - t.Errorf("VerifyMessageConsumer failed: %v", err) - } - - return err -} - -const providerStatesSetupPath = "/__setup" diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/publish.go b/vendor/github.com/pact-foundation/pact-go/dsl/publish.go deleted file mode 100644 index 7847b02..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/publish.go +++ /dev/null @@ -1,70 +0,0 @@ -package dsl - -import ( - "log" - "os" - - "github.com/hashicorp/logutils" - "github.com/pact-foundation/pact-go/types" -) - -// PactFile is a simple representation of a Pact file to be able to -// parse Consumer/Provider from the file. -type PactFile struct { - // The API Consumer name - Consumer PactName `json:"consumer"` - - // The API Provider name - Provider PactName `json:"provider"` -} - -// PactName represents the name fields in the PactFile. -type PactName struct { - Name string `json:"name"` -} - -// Publisher is the API to send Pact files to a Pact Broker. -type Publisher struct { - pactClient Client - - // Log levels. - LogLevel string - - // Used to detect if logging has been configured. - logFilter *logutils.LevelFilter -} - -// Publish sends the Pacts to a broker, optionally tagging them -func (p *Publisher) Publish(request types.PublishRequest) error { - p.setupLogging() - log.Println("[DEBUG] pact publisher: publish pact") - - if p.pactClient == nil { - c := NewClient() - p.pactClient = c - } - - err := request.Validate() - - if err != nil { - return err - } - - return p.pactClient.PublishPacts(request) -} - -// Configure logging -func (p *Publisher) setupLogging() { - if p.logFilter == nil { - if p.LogLevel == "" { - p.LogLevel = "INFO" - } - p.logFilter = &logutils.LevelFilter{ - Levels: []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERROR"}, - MinLevel: logutils.LogLevel(p.LogLevel), - Writer: os.Stderr, - } - log.SetOutput(p.logFilter) - } - log.Println("[DEBUG] pact setup logging") -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/request.go b/vendor/github.com/pact-foundation/pact-go/dsl/request.go deleted file mode 100644 index 7776ea7..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/request.go +++ /dev/null @@ -1,10 +0,0 @@ -package dsl - -// Request is the default implementation of the Request interface. -type Request struct { - Method string `json:"method"` - Path Matcher `json:"path"` - Query MapMatcher `json:"query,omitempty"` - Headers MapMatcher `json:"headers,omitempty"` - Body interface{} `json:"body,omitempty"` -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/response.go b/vendor/github.com/pact-foundation/pact-go/dsl/response.go deleted file mode 100644 index b303630..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/response.go +++ /dev/null @@ -1,8 +0,0 @@ -package dsl - -// Response is the default implementation of the Response interface. -type Response struct { - Status int `json:"status"` - Headers MapMatcher `json:"headers,omitempty"` - Body interface{} `json:"body,omitempty"` -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/service_mock.go b/vendor/github.com/pact-foundation/pact-go/dsl/service_mock.go deleted file mode 100644 index 978264c..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/service_mock.go +++ /dev/null @@ -1,65 +0,0 @@ -package dsl - -import ( - "os/exec" - - "github.com/pact-foundation/pact-go/client" -) - -// ServiceMock is the mock implementation of the Service interface. -type ServiceMock struct { - Cmd string - processes map[int]*exec.Cmd - Args []string - ServiceStopResult bool - ServiceStopError error - ServiceList map[int]*exec.Cmd - ServiceStartCmd *exec.Cmd - ServiceStartCount int - ServicePort int - ServiceStopCount int - ServicesSetupCalled bool - - // ExecFunc sets the function to run when starting commands - ExecFunc func() *exec.Cmd -} - -// Setup the Management services. -func (s *ServiceMock) Setup() { - s.ServicesSetupCalled = true -} - -// Stop a Service and returns the exit status. -func (s *ServiceMock) Stop(pid int) (bool, error) { - s.ServiceStopCount++ - return s.ServiceStopResult, s.ServiceStopError -} - -// List all Service PIDs. -func (s *ServiceMock) List() map[int]*exec.Cmd { - return s.ServiceList -} - -// Start a Service and log its output. -func (s *ServiceMock) Start() *exec.Cmd { - - s.ServiceStartCount++ - cmd := s.ExecFunc() - cmd.Start() - if s.processes == nil { - s.processes = make(map[int]*exec.Cmd) - } - s.processes[cmd.Process.Pid] = cmd - - return cmd -} - -// Command implements to Command operation -func (s *ServiceMock) Command() *exec.Cmd { - return s.ExecFunc() -} - -// NewService creates a new MockService with default settings. -func (s *ServiceMock) NewService(args []string) client.Service { - return s -} diff --git a/vendor/github.com/pact-foundation/pact-go/dsl/verify_mesage_request.go b/vendor/github.com/pact-foundation/pact-go/dsl/verify_mesage_request.go deleted file mode 100644 index a72b4ac..0000000 --- a/vendor/github.com/pact-foundation/pact-go/dsl/verify_mesage_request.go +++ /dev/null @@ -1,83 +0,0 @@ -package dsl - -import ( - "fmt" -) - -// VerifyMessageRequest contains the verification logic -// to send to the Pact Message verifier -type VerifyMessageRequest struct { - // Local/HTTP paths to Pact files. - PactURLs []string - - // Pact Broker URL for broker-based verification - BrokerURL string - - // Tags to find in Broker for matrix-based testing - Tags []string - - // Username when authenticating to a Pact Broker. - BrokerUsername string - - // Password when authenticating to a Pact Broker. - BrokerPassword string - - // BrokerToken is required when authenticating using the Bearer token mechanism - BrokerToken string - - // PublishVerificationResults to the Pact Broker. - PublishVerificationResults bool - - // ProviderVersion is the semantical version of the Provider API. - ProviderVersion string - - // ProviderTags is the set of tags to apply to the provider application version when results are published to the broker - ProviderTags []string - - // MessageHandlers contains a mapped list of message handlers for a provider - // that will be rable to produce the correct message format for a given - // consumer interaction - MessageHandlers MessageHandlers - - // StateHandlers contain a mapped list of message states to functions - // that are used to setup a given provider state prior to the message - // verification step. - StateHandlers StateHandlers - - // Arguments to the VerificationProvider - // Deprecated: This will be deleted after the native library replaces Ruby deps. - Args []string -} - -// Validate checks that the minimum fields are provided. -// Deprecated: This map be deleted after the native library replaces Ruby deps, -// and should not be used outside of this library. -func (v *VerifyMessageRequest) Validate() error { - v.Args = []string{} - - if len(v.PactURLs) != 0 { - v.Args = append(v.Args, v.PactURLs...) - } else { - return fmt.Errorf("Pact URLs is mandatory") - } - - v.Args = append(v.Args, "--format", "json") - - if v.BrokerUsername != "" { - v.Args = append(v.Args, "--broker-username", v.BrokerUsername) - } - - if v.BrokerPassword != "" { - v.Args = append(v.Args, "--broker-password", v.BrokerPassword) - } - - if v.ProviderVersion != "" { - v.Args = append(v.Args, "--provider_app_version", v.ProviderVersion) - } - - if v.PublishVerificationResults { - v.Args = append(v.Args, "--publish_verification_results", "true") - } - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/install/installer.go b/vendor/github.com/pact-foundation/pact-go/install/installer.go deleted file mode 100644 index e7ac26b..0000000 --- a/vendor/github.com/pact-foundation/pact-go/install/installer.go +++ /dev/null @@ -1,101 +0,0 @@ -// Package install contains functions necessary for installing and checking -// if the necessary underlying Ruby tools have been properly installed -package install - -import ( - "fmt" - "log" - "os/exec" - "strings" - - goversion "github.com/hashicorp/go-version" -) - -// Installer manages the underlying Ruby installation -type Installer struct { - commander commander -} - -const ( - mockServiceRange = ">= 3.5.0, < 4.0.0" - verifierRange = ">= 1.31.0, < 2.0.0" - brokerRange = ">= 1.22.3" -) - -var versionMap = map[string]string{ - "pact-mock-service": mockServiceRange, - "pact-provider-verifier": verifierRange, - "pact-broker": brokerRange, -} - -// NewInstaller creates a new initialised Installer -func NewInstaller() *Installer { - return &Installer{commander: realCommander{}} -} - -// CheckInstallation checks installation of all of the tools -func (i *Installer) CheckInstallation() error { - - for binary, versionRange := range versionMap { - log.Println("[INFO] checking", binary, "within range", versionRange) - - version, err := i.GetVersionForBinary(binary) - if err != nil { - return err - } - - if err = i.CheckVersion(binary, version); err != nil { - return err - } - } - - return nil -} - -// CheckVersion checks installation of a given binary using semver-compatible -// comparisons -func (i *Installer) CheckVersion(binary, version string) error { - log.Println("[DEBUG] checking version for binary", binary, "version", version) - v, err := goversion.NewVersion(version) - if err != nil { - log.Println("[DEBUG] err", err) - return err - } - - versionRange, ok := versionMap[binary] - if !ok { - return fmt.Errorf("unable to find version range for binary %s", binary) - } - - log.Println("[DEBUG] checking if version", v, "within semver range", versionRange) - constraints, err := goversion.NewConstraint(versionRange) - if constraints.Check(v) { - log.Println("[DEBUG]", v, "satisfies constraints", v, constraints) - return nil - } - - return fmt.Errorf("version %s of %s does not match constraint %s", version, binary, versionRange) -} - -// GetVersionForBinary gets the version of a given Ruby binary -func (i *Installer) GetVersionForBinary(binary string) (version string, err error) { - log.Println("[DEBUG] running binary", binary) - - content, err := i.commander.Output(binary, "version") - elements := strings.Split(strings.TrimSpace(string(content)), "\n") - version = strings.TrimSpace(elements[len(elements)-1]) - - return version, err -} - -// commander wraps the exec package, allowing us -// properly test the file system -type commander interface { - Output(command string, args ...string) ([]byte, error) -} - -type realCommander struct{} - -func (c realCommander) Output(command string, args ...string) ([]byte, error) { - return exec.Command(command, args...).CombinedOutput() -} diff --git a/vendor/github.com/pact-foundation/pact-go/proxy/http.go b/vendor/github.com/pact-foundation/pact-go/proxy/http.go deleted file mode 100644 index b1a2bbc..0000000 --- a/vendor/github.com/pact-foundation/pact-go/proxy/http.go +++ /dev/null @@ -1,192 +0,0 @@ -package proxy - -import ( - "crypto/tls" - "fmt" - "log" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/pact-foundation/pact-go/utils" -) - -// Middleware is a way to use composition to add functionality -// by intercepting the req/response cycle of the Reverse Proxy. -// Each handler must accept an http.Handler and also return an -// http.Handler, allowing a simple way to chain functionality together -type Middleware func(http.Handler) http.Handler - -// Options for the Reverse Proxy configuration -type Options struct { - - // TargetScheme is one of 'http' or 'https' - TargetScheme string - - // TargetAddress is the host:port component to proxy - TargetAddress string - - // TargetPath is the path on the target to proxy - TargetPath string - - // ProxyPort is the port to make available for proxying - // Defaults to a random port - ProxyPort int - - // Middleware to apply to the Proxy - Middleware []Middleware - - // Internal request prefix for proxy to not rewrite - InternalRequestPathPrefix string - - // Custom TLS Configuration for communicating with a Provider - // Useful when verifying self-signed services, MASSL etc. - CustomTLSConfig *tls.Config -} - -// loggingMiddleware logs requests to the proxy -func loggingMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - log.Printf("[DEBUG] http reverse proxy received connection from %s on path %s\n", r.RemoteAddr, r.RequestURI) - next.ServeHTTP(w, r) - }) -} - -// chainHandlers takes a set of middleware and joins them together -// into a single Middleware, making it much simpler to compose middleware -// together -func chainHandlers(mw ...Middleware) Middleware { - return func(final http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - last := final - for i := len(mw) - 1; i >= 0; i-- { - last = mw[i](last) - } - last.ServeHTTP(w, r) - }) - } -} - -// HTTPReverseProxy provides a default setup for proxying -// internal components within the framework -func HTTPReverseProxy(options Options) (int, error) { - log.Println("[DEBUG] starting new proxy with opts", options) - port := options.ProxyPort - var err error - - url := &url.URL{ - Scheme: options.TargetScheme, - Host: options.TargetAddress, - Path: options.TargetPath, - } - - proxy := createProxy(url, options.InternalRequestPathPrefix) - proxy.Transport = customTransport{tlsConfig: options.CustomTLSConfig} - - if port == 0 { - port, err = utils.GetFreePort() - if err != nil { - log.Println("[ERROR] unable to start reverse proxy server:", err) - return 0, err - } - } - - wrapper := chainHandlers(append(options.Middleware, loggingMiddleware)...) - - log.Println("[DEBUG] starting reverse proxy on port", port) - go http.ListenAndServe(fmt.Sprintf(":%d", port), wrapper(proxy)) - - return port, nil -} - -// https://stackoverflow.com/questions/52986853/how-to-debug-httputil-newsinglehostreverseproxy -// Set the proxy.Transport field to an implementation that dumps the request before delegating to the default transport: - -type customTransport struct { - tlsConfig *tls.Config -} - -func (c customTransport) RoundTrip(r *http.Request) (*http.Response, error) { - b, err := httputil.DumpRequestOut(r, false) - if err != nil { - return nil, err - } - log.Println("[TRACE] proxy outgoing request\n", string(b)) - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - - if c.tlsConfig != nil { - log.Println("[DEBUG] applying custom TLS config") - transport.TLSClientConfig = c.tlsConfig - } - var DefaultTransport http.RoundTripper = transport - - res, err := DefaultTransport.RoundTrip(r) - if err != nil { - log.Println("[ERROR]", err) - return nil, err - } - b, err = httputil.DumpResponse(res, true) - log.Println("[TRACE] proxied server response\n", string(b)) - - return res, err -} - -// Adapted from https://github.com/golang/go/blob/master/src/net/http/httputil/reverseproxy.go -func createProxy(target *url.URL, ignorePrefix string) *httputil.ReverseProxy { - targetQuery := target.RawQuery - director := func(req *http.Request) { - if !strings.HasPrefix(req.URL.Path, ignorePrefix) { - log.Println("[DEBUG] setting proxy to target") - log.Println("[DEBUG] incoming request", req.URL) - req.URL.Scheme = target.Scheme - req.URL.Host = target.Host - req.Host = target.Host - - req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path) - log.Println("[DEBUG] outgoing request to target", req.URL) - if targetQuery == "" || req.URL.RawQuery == "" { - req.URL.RawQuery = targetQuery + req.URL.RawQuery - } else { - req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery - } - if _, ok := req.Header["User-Agent"]; !ok { - req.Header.Set("User-Agent", "Pact Go") - } - } else { - log.Println("[DEBUG] setting proxy to internal server") - req.URL.Scheme = "http" - req.URL.Host = "localhost" - req.Host = "localhost" - } - } - return &httputil.ReverseProxy{Director: director} -} - -// From httputil package -// https://github.com/golang/go/blob/master/src/net/http/httputil/reverseproxy.go -func singleJoiningSlash(a, b string) string { - aslash := strings.HasSuffix(a, "/") - bslash := strings.HasPrefix(b, "/") - switch { - case aslash && bslash: - return a + b[1:] - case !aslash && !bslash: - return a + "/" + b - } - return a + b -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/command_response.go b/vendor/github.com/pact-foundation/pact-go/types/command_response.go deleted file mode 100644 index aef5da2..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/command_response.go +++ /dev/null @@ -1,12 +0,0 @@ -// Package types contains a number of structs common to the library. -package types - -// CommandResponse contains the exit status and any message from running -// an external command / service. -type CommandResponse struct { - // System exit code from the command. Note that this will only even be 0 or 1. - ExitCode int - - // Error message (if any) from the command. - Message string -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/consumer_version_selector.go b/vendor/github.com/pact-foundation/pact-go/types/consumer_version_selector.go deleted file mode 100644 index 2ecbecc..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/consumer_version_selector.go +++ /dev/null @@ -1,31 +0,0 @@ -package types - -import "fmt" - -// ConsumerVersionSelector are the way we specify which pacticipants and -// versions we want to use when configuring verifications -// See https://docs.pact.io/selectors for more -type ConsumerVersionSelector struct { - Pacticipant string `json:"pacticipant"` - Tag string `json:"tag"` - Version string `json:"version"` - Latest bool `json:"latest"` - All bool `json:"all"` -} - -// Validate the selector configuration -func (c *ConsumerVersionSelector) Validate() error { - if c.All && c.Pacticipant == "" { - return fmt.Errorf("must provide a Pacticpant") - } - - if c.Pacticipant != "" && c.Tag == "" { - return fmt.Errorf("must provide at least a Tag if Pacticpant specified") - } - - if c.All && c.Latest { - return fmt.Errorf("cannot select both All and Latest") - } - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/handler.go b/vendor/github.com/pact-foundation/pact-go/types/handler.go deleted file mode 100644 index 938165c..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/handler.go +++ /dev/null @@ -1,14 +0,0 @@ -package types - -// StateHandler is a provider function that sets up a given state before -// the provider interaction is validated -type StateHandler func() error - -// StateHandlers is a list of StateHandler's -type StateHandlers map[string]StateHandler - -// State specifies how the system should be configured when -// verified. e.g. "user A exists" -type State struct { - Name string `json:"name"` -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/mock_server.go b/vendor/github.com/pact-foundation/pact-go/types/mock_server.go deleted file mode 100644 index 3b746c7..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/mock_server.go +++ /dev/null @@ -1,9 +0,0 @@ -package types - -// MockServer contains the RPC client interface to a Mock Server -type MockServer struct { - Pid int - Port int - Error error - Args []string -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/pact_message_request.go b/vendor/github.com/pact-foundation/pact-go/types/pact_message_request.go deleted file mode 100644 index 231325e..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/pact_message_request.go +++ /dev/null @@ -1,49 +0,0 @@ -package types - -import "encoding/json" - -// PactMessageRequest contains the response from the Pact Message -// CLI execution. -type PactMessageRequest struct { - - // Message is the object to be marshalled to JSON - Message interface{} - - // Consumer is the name of the message consumer - Consumer string - - // Provider is the name of the message provider - Provider string - - // PactDir is the location of where pacts should be stored - PactDir string - - // Args are the arguments sent to to the message service - Args []string -} - -// Validate checks all things are well and constructs -// the CLI args to the message service -func (m *PactMessageRequest) Validate() error { - m.Args = []string{} - - body, err := json.Marshal(m.Message) - if err != nil { - return err - } - - m.Args = append(m.Args, []string{ - "update", - string(body), - "--consumer", - m.Consumer, - "--provider", - m.Provider, - "--pact-dir", - m.PactDir, - "--pact-specification-version", - "3", - }...) - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/pact_reification_request.go b/vendor/github.com/pact-foundation/pact-go/types/pact_reification_request.go deleted file mode 100644 index 15fc175..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/pact_reification_request.go +++ /dev/null @@ -1,32 +0,0 @@ -package types - -import "encoding/json" - -// PactReificationRequest contains the response from the Pact Message -// CLI execution. -type PactReificationRequest struct { - - // Message is the object to be marshalled to JSON - Message interface{} - - // Args are the arguments sent to to the message service - Args []string -} - -// Validate checks all things are well and constructs -// the CLI args to the message service -func (m *PactReificationRequest) Validate() error { - m.Args = []string{} - - body, err := json.Marshal(m.Message) - if err != nil { - return err - } - - m.Args = append(m.Args, []string{ - "reify", - string(body), - }...) - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/provider_state.go b/vendor/github.com/pact-foundation/pact-go/types/provider_state.go deleted file mode 100644 index 2d6551e..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/provider_state.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// ProviderState Models a provider state coming over the Wire. -// This is generally provided as a request to an HTTP endpoint (e.g. PUT /state) -// to configure a state on a Provider. -type ProviderState struct { - Consumer string `json:"consumer"` - State string `json:"state"` - States []string `json:"states"` -} - -// ProviderStates is a mapping of consumers to all known states. This is usually -// a response from an HTTP endpoint (e.g. GET /states) to find all states a -// provider has. -type ProviderStates map[string][]string diff --git a/vendor/github.com/pact-foundation/pact-go/types/provider_verifier_response.go b/vendor/github.com/pact-foundation/pact-go/types/provider_verifier_response.go deleted file mode 100644 index 48212a8..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/provider_verifier_response.go +++ /dev/null @@ -1,41 +0,0 @@ -package types - -// ProviderVerifierResponse contains the output of the pact-provider-verifier -// command. -type ProviderVerifierResponse struct { - Version string `json:"version"` - Examples []struct { - ID string `json:"id"` - Description string `json:"description"` - FullDescription string `json:"full_description"` - Status string `json:"status"` - FilePath string `json:"file_path"` - LineNumber int `json:"line_number"` - RunTime float64 `json:"run_time"` - PendingMessage interface{} `json:"pending_message"` - Mismatches []string `json:"mismatches"` - Pact struct { - ConsumerName string `json:"consumer_name"` - ProviderName string `json:"provider_name"` - URL string `json:"url"` - ShortDescription string `json:"short_description"` - } `json:"pact"` - Exception struct { - Class string `json:"class"` - Message string `json:"message"` - Backtrace []string `json:"backtrace"` - } `json:"exception,omitempty"` - } `json:"examples"` - Summary struct { - Duration float64 `json:"duration"` - ExampleCount int `json:"example_count"` - FailureCount int `json:"failure_count"` - PendingCount int `json:"pending_count"` - ErrorsOutsideOfExamplesCount int `json:"errors_outside_of_examples_count"` - Notices []struct { - Text string `json:"text"` - When string `json:"when"` - } `json:"notices"` - } `json:"summary"` - SummaryLine string `json:"summary_line"` -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/publish_request.go b/vendor/github.com/pact-foundation/pact-go/types/publish_request.go deleted file mode 100644 index b5f41b1..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/publish_request.go +++ /dev/null @@ -1,90 +0,0 @@ -package types - -import ( - "errors" - "fmt" -) - -// PublishRequest contains the details required to Publish Pacts to a broker. -type PublishRequest struct { - // Array of local Pact files or directories containing them. Required. - PactURLs []string - - // URL to fetch the provider states for the given provider API. Optional. - PactBroker string - - // Username for Pact Broker basic authentication. Optional - BrokerUsername string - - // Password for Pact Broker basic authentication. Optional - BrokerPassword string - - // BrokerToken is required when authenticating using the Bearer token mechanism - BrokerToken string - - // ConsumerVersion is the semantical version of the consumer API. - ConsumerVersion string - - // Tags help you organise your Pacts for different testing purposes. - // e.g. "production", "master" and "development" are some common examples. - Tags []string - - // Verbose increases verbosity of output - // Deprecated - Verbose bool - - // Arguments to the VerificationProvider - // Deprecated: This will be deleted after the native library replaces Ruby deps. - Args []string -} - -// Validate checks that the minimum fields are provided. -// Deprecated: This map be deleted after the native library replaces Ruby deps, -// and should not be used outside of this library. -func (p *PublishRequest) Validate() error { - p.Args = []string{} - - if len(p.PactURLs) != 0 { - p.Args = append(p.Args, p.PactURLs...) - } else { - return fmt.Errorf("'PactURLs' is mandatory") - } - - if p.BrokerUsername != "" { - p.Args = append(p.Args, "--broker-username", p.BrokerUsername) - } - - if p.BrokerPassword != "" { - p.Args = append(p.Args, "--broker-password", p.BrokerPassword) - } - - if p.PactBroker != "" && ((p.BrokerUsername == "" && p.BrokerPassword != "") || (p.BrokerUsername != "" && p.BrokerPassword == "")) { - return errors.New("both 'BrokerUsername' and 'BrokerPassword' must be supplied if one given") - } - - if p.PactBroker == "" { - return fmt.Errorf("'PactBroker' is mandatory") - } - p.Args = append(p.Args, "--broker-base-url", p.PactBroker) - - if p.BrokerToken != "" { - p.Args = append(p.Args, "--broker-token", p.BrokerToken) - } - - if p.ConsumerVersion == "" { - return fmt.Errorf("'ConsumerVersion' is mandatory") - } - p.Args = append(p.Args, "--consumer-app-version", p.ConsumerVersion) - - if len(p.Tags) > 0 { - for _, t := range p.Tags { - p.Args = append(p.Args, "--tag", t) - } - } - - if p.Verbose { - p.Args = append(p.Args, "--verbose") - } - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/reification_response.go b/vendor/github.com/pact-foundation/pact-go/types/reification_response.go deleted file mode 100644 index 4efb8b3..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/reification_response.go +++ /dev/null @@ -1,10 +0,0 @@ -package types - -// ReificationResponse contains the output of the reification request -type ReificationResponse struct { - // Interface wrapped object - Response interface{} - - // Raw response from reification - ResponseRaw []byte -} diff --git a/vendor/github.com/pact-foundation/pact-go/types/verify_request.go b/vendor/github.com/pact-foundation/pact-go/types/verify_request.go deleted file mode 100644 index 32845ce..0000000 --- a/vendor/github.com/pact-foundation/pact-go/types/verify_request.go +++ /dev/null @@ -1,238 +0,0 @@ -package types - -import ( - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "log" - "time" - - "github.com/pact-foundation/pact-go/proxy" -) - -// Hook functions are used to tap into the lifecycle of a Consumer or Provider test -type Hook func() error - -// VerifyRequest contains the verification params. -type VerifyRequest struct { - // URL to hit during provider verification. - ProviderBaseURL string - - // Local/HTTP paths to Pact files. - PactURLs []string - - // Pact Broker URL for broker-based verification - BrokerURL string - - // Selectors are the way we specify which pacticipants and - // versions we want to use when configuring verifications - // See https://docs.pact.io/selectors for more - ConsumerVersionSelectors []ConsumerVersionSelector - - // Retrieve the latest pacts with this consumer version tag - Tags []string - - // Tags to apply to the provider application version - ProviderTags []string - - // ProviderStatesSetupURL is the endpoint to post current provider state - // to on the Provider API. - // Deprecated: For backward compatibility ProviderStatesSetupURL is - // still supported. Use StateHandlers instead. - ProviderStatesSetupURL string - - // Provider is the name of the Providing service. - Provider string - - // Username when authenticating to a Pact Broker. - BrokerUsername string - - // Password when authenticating to a Pact Broker. - BrokerPassword string - - // BrokerToken is required when authenticating using the Bearer token mechanism - BrokerToken string - - // FailIfNoPactsFound configures the framework to return an error - // if no pacts were found when looking up from a broker - FailIfNoPactsFound bool - - // PublishVerificationResults to the Pact Broker. - PublishVerificationResults bool - - // ProviderVersion is the semantical version of the Provider API. - ProviderVersion string - - // CustomProviderHeaders are headers to add during pact verification `requests`. - // eg 'Authorization: Basic cGFjdDpwYWN0'. - // - // NOTE: Use this feature very carefully, as anything in here is not captured - // in the contract (e.g. time-bound tokens) - // - // NOTE: This should be used very carefully and deliberately, as anything you do here - // runs the risk of changing the contract and breaking the real system. - CustomProviderHeaders []string - - // StateHandlers contain a mapped list of message states to functions - // that are used to setup a given provider state prior to the message - // verification step. - StateHandlers StateHandlers - - // BeforeEach allows you to configure your provider prior to the individual test execution - // e.g. setup temporary tokens, prepare data - BeforeEach Hook - - // AfterEach allows you to configure your provider prior to the test execution - // e.g. reset the database state - AfterEach Hook - - // RequestFilter is a piece of middleware that will intercept requests/responses - // from the provider in order to modify it. This is useful in situations where - // you need to override a value due to time sensitivity - such as a OAuth Bearer - // token. - // NOTE: This should be used very carefully and deliberately, as anything you do here - // runs the risk of changing the contract and breaking the real system. - RequestFilter proxy.Middleware - - // Custom TLS Configuration to use when making the requests to/from - // the Provider API. Useful for setting custom certificates, MASSL etc. - CustomTLSConfig *tls.Config - - // Allow pending pacts to be included in verification (see pact.io/pending) - EnablePending bool - - // Pull in new WIP pacts from _any_ tag (see pact.io/wip) - IncludeWIPPactsSince *time.Time - - // Specify an output directory to log all of the verification request/responses - // seen by the verification process. Useful to debug issues with your contract - // and API - PactLogDir string - - // Specify the log verbosity of the CLI verifier process spawned through verification - // Useful for debugging issues with the framework itself - PactLogLevel string - - // Verbose increases verbosity of output - // Deprecated - Verbose bool - - // Arguments to the VerificationProvider - // Deprecated: This will be deleted after the native library replaces Ruby deps. - Args []string -} - -// Validate checks that the minimum fields are provided. -// Deprecated: This map be deleted after the native library replaces Ruby deps, -// and should not be used outside of this library. -func (v *VerifyRequest) Validate() error { - v.Args = []string{} - var err error - - if len(v.PactURLs) != 0 { - v.Args = append(v.Args, v.PactURLs...) - } - - if len(v.PactURLs) == 0 && v.BrokerURL == "" { - return fmt.Errorf("One of 'PactURLs' or 'BrokerURL' must be specified") - } - - if len(v.ConsumerVersionSelectors) != 0 { - for _, selector := range v.ConsumerVersionSelectors { - if err = selector.Validate(); err != nil { - return fmt.Errorf("invalid consumer version selector specified: %v", err) - } - body, err := json.Marshal(selector) - if err != nil { - return fmt.Errorf("invalid consumer version selector specified: %v", err) - } - - v.Args = append(v.Args, "--consumer-version-selector", string(body)) - } - } - - if len(v.CustomProviderHeaders) != 0 { - for _, header := range v.CustomProviderHeaders { - v.Args = append(v.Args, "--custom-provider-header", header) - } - } - - v.Args = append(v.Args, "--format", "json") - - if v.ProviderBaseURL != "" { - v.Args = append(v.Args, "--provider-base-url", v.ProviderBaseURL) - } else { - return fmt.Errorf("Provider base URL is mandatory") - } - - if v.ProviderStatesSetupURL != "" { - v.Args = append(v.Args, "--provider-states-setup-url", v.ProviderStatesSetupURL) - } - - if v.BrokerUsername != "" { - v.Args = append(v.Args, "--broker-username", v.BrokerUsername) - } - - if v.BrokerPassword != "" { - v.Args = append(v.Args, "--broker-password", v.BrokerPassword) - } - - if v.BrokerURL != "" && ((v.BrokerUsername == "" && v.BrokerPassword != "") || (v.BrokerUsername != "" && v.BrokerPassword == "")) { - return errors.New("both 'BrokerUsername' and 'BrokerPassword' must be supplied if one given") - } - - if v.BrokerURL != "" { - v.Args = append(v.Args, "--pact-broker-base-url", v.BrokerURL) - } - - if v.BrokerToken != "" { - v.Args = append(v.Args, "--broker-token", v.BrokerToken) - } - - if v.BrokerURL != "" && v.ProviderVersion == "" { - return errors.New("both 'ProviderVersion' must be supplied if 'BrokerURL' given") - } - - if v.ProviderVersion != "" { - v.Args = append(v.Args, "--provider_app_version", v.ProviderVersion) - } - - if v.Provider != "" { - v.Args = append(v.Args, "--provider", v.Provider) - } - - if v.PublishVerificationResults { - v.Args = append(v.Args, "--publish_verification_results", "true") - } - - if v.Verbose { - log.Println("[DEBUG] verifier: ignoring deprecated Verbose flag") - } - - for _, tag := range v.Tags { - v.Args = append(v.Args, "--consumer-version-tag", tag) - } - - for _, tag := range v.ProviderTags { - v.Args = append(v.Args, "--provider-version-tag", tag) - } - - if v.EnablePending { - v.Args = append(v.Args, "--enable-pending") - } - - if v.IncludeWIPPactsSince != nil { - v.Args = append(v.Args, "--include-wip-pacts-since", v.IncludeWIPPactsSince.Format(time.RFC3339)) - } - - if v.PactLogDir != "" { - v.Args = append(v.Args, "--log-dir", v.PactLogDir) - } - - if v.PactLogLevel != "" { - v.Args = append(v.Args, "--log-level", v.PactLogLevel) - } - - return nil -} diff --git a/vendor/github.com/pact-foundation/pact-go/utils/port.go b/vendor/github.com/pact-foundation/pact-go/utils/port.go deleted file mode 100644 index 605fcfa..0000000 --- a/vendor/github.com/pact-foundation/pact-go/utils/port.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package utils contains a number of helper / utility functions. -package utils - -import ( - "errors" - "fmt" - "net" - "strconv" - "strings" -) - -// GetFreePort Gets an available port by asking the kernal for a random port -// ready and available for use. -func GetFreePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return 0, err - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return 0, err - } - defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil -} - -// FindPortInRange Iterate through CSV or Range of ports to find open port -// Valid inputs are "8081", "8081,8085", "8081-8085". Do not combine -// list and range -func FindPortInRange(s string) (int, error) { - // Take care of csv and single value - if !strings.Contains(s, "-") { - ports := strings.Split(strings.TrimSpace(s), ",") - for _, p := range ports { - i, err := strconv.Atoi(p) - if err != nil { - return 0, err - } - err = checkPort(i) - if err != nil { - continue - } - return i, nil - } - return 0, errors.New("all passed ports are unusable") - } - // Now take care of ranges - ports := strings.Split(strings.TrimSpace(s), "-") - if len(ports) != 2 { - return 0, errors.New("invalid range passed") - } - lower, err := strconv.Atoi(ports[0]) - if err != nil { - return 0, err - } - upper, err := strconv.Atoi(ports[1]) - if err != nil { - return 0, err - } - if upper < lower { - return 0, errors.New("invalid range passed") - } - for i := lower; i <= upper; i++ { - err = checkPort(i) - if err != nil { - continue - } - return i, nil - } - return 0, errors.New("all passed ports are unusable") -} - -func checkPort(p int) error { - s := fmt.Sprintf("localhost:%d", p) - addr, err := net.ResolveTCPAddr("tcp", s) - if err != nil { - return err - } - - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return err - } - defer l.Close() - return nil -} diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE deleted file mode 100644 index c67dad6..0000000 --- a/vendor/github.com/pmezard/go-difflib/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2013, Patrick Mezard -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - The names of its contributors may not be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go deleted file mode 100644 index 003e99f..0000000 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ /dev/null @@ -1,772 +0,0 @@ -// Package difflib is a partial port of Python difflib module. -// -// It provides tools to compare sequences of strings and generate textual diffs. -// -// The following class and functions have been ported: -// -// - SequenceMatcher -// -// - unified_diff -// -// - context_diff -// -// Getting unified diffs was the main goal of the port. Keep in mind this code -// is mostly suitable to output text differences in a human friendly way, there -// are no guarantees generated diffs are consumable by patch(1). -package difflib - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strings" -) - -func min(a, b int) int { - if a < b { - return a - } - return b -} - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func calculateRatio(matches, length int) float64 { - if length > 0 { - return 2.0 * float64(matches) / float64(length) - } - return 1.0 -} - -type Match struct { - A int - B int - Size int -} - -type OpCode struct { - Tag byte - I1 int - I2 int - J1 int - J2 int -} - -// SequenceMatcher compares sequence of strings. The basic -// algorithm predates, and is a little fancier than, an algorithm -// published in the late 1980's by Ratcliff and Obershelp under the -// hyperbolic name "gestalt pattern matching". The basic idea is to find -// the longest contiguous matching subsequence that contains no "junk" -// elements (R-O doesn't address junk). The same idea is then applied -// recursively to the pieces of the sequences to the left and to the right -// of the matching subsequence. This does not yield minimal edit -// sequences, but does tend to yield matches that "look right" to people. -// -// SequenceMatcher tries to compute a "human-friendly diff" between two -// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the -// longest *contiguous* & junk-free matching subsequence. That's what -// catches peoples' eyes. The Windows(tm) windiff has another interesting -// notion, pairing up elements that appear uniquely in each sequence. -// That, and the method here, appear to yield more intuitive difference -// reports than does diff. This method appears to be the least vulnerable -// to synching up on blocks of "junk lines", though (like blank lines in -// ordinary text files, or maybe "

" lines in HTML files). That may be -// because this is the only method of the 3 that has a *concept* of -// "junk" . -// -// Timing: Basic R-O is cubic time worst case and quadratic time expected -// case. SequenceMatcher is quadratic time for the worst case and has -// expected-case behavior dependent in a complicated way on how many -// elements the sequences have in common; best case time is linear. -type SequenceMatcher struct { - a []string - b []string - b2j map[string][]int - IsJunk func(string) bool - autoJunk bool - bJunk map[string]struct{} - matchingBlocks []Match - fullBCount map[string]int - bPopular map[string]struct{} - opCodes []OpCode -} - -func NewMatcher(a, b []string) *SequenceMatcher { - m := SequenceMatcher{autoJunk: true} - m.SetSeqs(a, b) - return &m -} - -func NewMatcherWithJunk(a, b []string, autoJunk bool, - isJunk func(string) bool) *SequenceMatcher { - - m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} - m.SetSeqs(a, b) - return &m -} - -// Set two sequences to be compared. -func (m *SequenceMatcher) SetSeqs(a, b []string) { - m.SetSeq1(a) - m.SetSeq2(b) -} - -// Set the first sequence to be compared. The second sequence to be compared is -// not changed. -// -// SequenceMatcher computes and caches detailed information about the second -// sequence, so if you want to compare one sequence S against many sequences, -// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other -// sequences. -// -// See also SetSeqs() and SetSeq2(). -func (m *SequenceMatcher) SetSeq1(a []string) { - if &a == &m.a { - return - } - m.a = a - m.matchingBlocks = nil - m.opCodes = nil -} - -// Set the second sequence to be compared. The first sequence to be compared is -// not changed. -func (m *SequenceMatcher) SetSeq2(b []string) { - if &b == &m.b { - return - } - m.b = b - m.matchingBlocks = nil - m.opCodes = nil - m.fullBCount = nil - m.chainB() -} - -func (m *SequenceMatcher) chainB() { - // Populate line -> index mapping - b2j := map[string][]int{} - for i, s := range m.b { - indices := b2j[s] - indices = append(indices, i) - b2j[s] = indices - } - - // Purge junk elements - m.bJunk = map[string]struct{}{} - if m.IsJunk != nil { - junk := m.bJunk - for s, _ := range b2j { - if m.IsJunk(s) { - junk[s] = struct{}{} - } - } - for s, _ := range junk { - delete(b2j, s) - } - } - - // Purge remaining popular elements - popular := map[string]struct{}{} - n := len(m.b) - if m.autoJunk && n >= 200 { - ntest := n/100 + 1 - for s, indices := range b2j { - if len(indices) > ntest { - popular[s] = struct{}{} - } - } - for s, _ := range popular { - delete(b2j, s) - } - } - m.bPopular = popular - m.b2j = b2j -} - -func (m *SequenceMatcher) isBJunk(s string) bool { - _, ok := m.bJunk[s] - return ok -} - -// Find longest matching block in a[alo:ahi] and b[blo:bhi]. -// -// If IsJunk is not defined: -// -// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where -// alo <= i <= i+k <= ahi -// blo <= j <= j+k <= bhi -// and for all (i',j',k') meeting those conditions, -// k >= k' -// i <= i' -// and if i == i', j <= j' -// -// In other words, of all maximal matching blocks, return one that -// starts earliest in a, and of all those maximal matching blocks that -// start earliest in a, return the one that starts earliest in b. -// -// If IsJunk is defined, first the longest matching block is -// determined as above, but with the additional restriction that no -// junk element appears in the block. Then that block is extended as -// far as possible by matching (only) junk elements on both sides. So -// the resulting block never matches on junk except as identical junk -// happens to be adjacent to an "interesting" match. -// -// If no blocks match, return (alo, blo, 0). -func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { - // CAUTION: stripping common prefix or suffix would be incorrect. - // E.g., - // ab - // acab - // Longest matching block is "ab", but if common prefix is - // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so - // strip, so ends up claiming that ab is changed to acab by - // inserting "ca" in the middle. That's minimal but unintuitive: - // "it's obvious" that someone inserted "ac" at the front. - // Windiff ends up at the same place as diff, but by pairing up - // the unique 'b's and then matching the first two 'a's. - besti, bestj, bestsize := alo, blo, 0 - - // find longest junk-free match - // during an iteration of the loop, j2len[j] = length of longest - // junk-free match ending with a[i-1] and b[j] - j2len := map[int]int{} - for i := alo; i != ahi; i++ { - // look at all instances of a[i] in b; note that because - // b2j has no junk keys, the loop is skipped if a[i] is junk - newj2len := map[int]int{} - for _, j := range m.b2j[m.a[i]] { - // a[i] matches b[j] - if j < blo { - continue - } - if j >= bhi { - break - } - k := j2len[j-1] + 1 - newj2len[j] = k - if k > bestsize { - besti, bestj, bestsize = i-k+1, j-k+1, k - } - } - j2len = newj2len - } - - // Extend the best by non-junk elements on each end. In particular, - // "popular" non-junk elements aren't in b2j, which greatly speeds - // the inner loop above, but also means "the best" match so far - // doesn't contain any junk *or* popular non-junk elements. - for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - !m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - // Now that we have a wholly interesting match (albeit possibly - // empty!), we may as well suck up the matching junk on each - // side of it too. Can't think of a good reason not to, and it - // saves post-processing the (possibly considerable) expense of - // figuring out what to do with it. In the case of an empty - // interesting match, this is clearly the right thing to do, - // because no other kind of match is possible in the regions. - for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && - m.a[besti-1] == m.b[bestj-1] { - besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 - } - for besti+bestsize < ahi && bestj+bestsize < bhi && - m.isBJunk(m.b[bestj+bestsize]) && - m.a[besti+bestsize] == m.b[bestj+bestsize] { - bestsize += 1 - } - - return Match{A: besti, B: bestj, Size: bestsize} -} - -// Return list of triples describing matching subsequences. -// -// Each triple is of the form (i, j, n), and means that -// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in -// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are -// adjacent triples in the list, and the second is not the last triple in the -// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe -// adjacent equal blocks. -// -// The last triple is a dummy, (len(a), len(b), 0), and is the only -// triple with n==0. -func (m *SequenceMatcher) GetMatchingBlocks() []Match { - if m.matchingBlocks != nil { - return m.matchingBlocks - } - - var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match - matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { - match := m.findLongestMatch(alo, ahi, blo, bhi) - i, j, k := match.A, match.B, match.Size - if match.Size > 0 { - if alo < i && blo < j { - matched = matchBlocks(alo, i, blo, j, matched) - } - matched = append(matched, match) - if i+k < ahi && j+k < bhi { - matched = matchBlocks(i+k, ahi, j+k, bhi, matched) - } - } - return matched - } - matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) - - // It's possible that we have adjacent equal blocks in the - // matching_blocks list now. - nonAdjacent := []Match{} - i1, j1, k1 := 0, 0, 0 - for _, b := range matched { - // Is this block adjacent to i1, j1, k1? - i2, j2, k2 := b.A, b.B, b.Size - if i1+k1 == i2 && j1+k1 == j2 { - // Yes, so collapse them -- this just increases the length of - // the first block by the length of the second, and the first - // block so lengthened remains the block to compare against. - k1 += k2 - } else { - // Not adjacent. Remember the first block (k1==0 means it's - // the dummy we started with), and make the second block the - // new block to compare against. - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - i1, j1, k1 = i2, j2, k2 - } - } - if k1 > 0 { - nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) - } - - nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) - m.matchingBlocks = nonAdjacent - return m.matchingBlocks -} - -// Return list of 5-tuples describing how to turn a into b. -// -// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple -// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the -// tuple preceding it, and likewise for j1 == the previous j2. -// -// The tags are characters, with these meanings: -// -// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] -// -// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. -// -// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. -// -// 'e' (equal): a[i1:i2] == b[j1:j2] -func (m *SequenceMatcher) GetOpCodes() []OpCode { - if m.opCodes != nil { - return m.opCodes - } - i, j := 0, 0 - matching := m.GetMatchingBlocks() - opCodes := make([]OpCode, 0, len(matching)) - for _, m := range matching { - // invariant: we've pumped out correct diffs to change - // a[:i] into b[:j], and the next matching block is - // a[ai:ai+size] == b[bj:bj+size]. So we need to pump - // out a diff to change a[i:ai] into b[j:bj], pump out - // the matching block, and move (i,j) beyond the match - ai, bj, size := m.A, m.B, m.Size - tag := byte(0) - if i < ai && j < bj { - tag = 'r' - } else if i < ai { - tag = 'd' - } else if j < bj { - tag = 'i' - } - if tag > 0 { - opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) - } - i, j = ai+size, bj+size - // the list of matching blocks is terminated by a - // sentinel with size 0 - if size > 0 { - opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) - } - } - m.opCodes = opCodes - return m.opCodes -} - -// Isolate change clusters by eliminating ranges with no changes. -// -// Return a generator of groups with up to n lines of context. -// Each group is in the same format as returned by GetOpCodes(). -func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { - if n < 0 { - n = 3 - } - codes := m.GetOpCodes() - if len(codes) == 0 { - codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} - } - // Fixup leading and trailing groups if they show no changes. - if codes[0].Tag == 'e' { - c := codes[0] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} - } - if codes[len(codes)-1].Tag == 'e' { - c := codes[len(codes)-1] - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} - } - nn := n + n - groups := [][]OpCode{} - group := []OpCode{} - for _, c := range codes { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - // End the current group and start a new one whenever - // there is a large range with no changes. - if c.Tag == 'e' && i2-i1 > nn { - group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), - j1, min(j2, j1+n)}) - groups = append(groups, group) - group = []OpCode{} - i1, j1 = max(i1, i2-n), max(j1, j2-n) - } - group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) - } - if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { - groups = append(groups, group) - } - return groups -} - -// Return a measure of the sequences' similarity (float in [0,1]). -// -// Where T is the total number of elements in both sequences, and -// M is the number of matches, this is 2.0*M / T. -// Note that this is 1 if the sequences are identical, and 0 if -// they have nothing in common. -// -// .Ratio() is expensive to compute if you haven't already computed -// .GetMatchingBlocks() or .GetOpCodes(), in which case you may -// want to try .QuickRatio() or .RealQuickRation() first to get an -// upper bound. -func (m *SequenceMatcher) Ratio() float64 { - matches := 0 - for _, m := range m.GetMatchingBlocks() { - matches += m.Size - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() relatively quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute. -func (m *SequenceMatcher) QuickRatio() float64 { - // viewing a and b as multisets, set matches to the cardinality - // of their intersection; this counts the number of matches - // without regard to order, so is clearly an upper bound - if m.fullBCount == nil { - m.fullBCount = map[string]int{} - for _, s := range m.b { - m.fullBCount[s] = m.fullBCount[s] + 1 - } - } - - // avail[x] is the number of times x appears in 'b' less the - // number of times we've seen it in 'a' so far ... kinda - avail := map[string]int{} - matches := 0 - for _, s := range m.a { - n, ok := avail[s] - if !ok { - n = m.fullBCount[s] - } - avail[s] = n - 1 - if n > 0 { - matches += 1 - } - } - return calculateRatio(matches, len(m.a)+len(m.b)) -} - -// Return an upper bound on ratio() very quickly. -// -// This isn't defined beyond that it is an upper bound on .Ratio(), and -// is faster to compute than either .Ratio() or .QuickRatio(). -func (m *SequenceMatcher) RealQuickRatio() float64 { - la, lb := len(m.a), len(m.b) - return calculateRatio(min(la, lb), la+lb) -} - -// Convert range to the "ed" format -func formatRangeUnified(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 1 { - return fmt.Sprintf("%d", beginning) - } - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - return fmt.Sprintf("%d,%d", beginning, length) -} - -// Unified diff parameters -type UnifiedDiff struct { - A []string // First sequence lines - FromFile string // First file name - FromDate string // First file time - B []string // Second sequence lines - ToFile string // Second file name - ToDate string // Second file time - Eol string // Headers end of line, defaults to LF - Context int // Number of context lines -} - -// Compare two sequences of lines; generate the delta as a unified diff. -// -// Unified diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by 'n' which -// defaults to three. -// -// By default, the diff control lines (those with ---, +++, or @@) are -// created with a trailing newline. This is helpful so that inputs -// created from file.readlines() result in diffs that are suitable for -// file.writelines() since both the inputs and outputs have trailing -// newlines. -// -// For inputs that do not have trailing newlines, set the lineterm -// argument to "" so that the output will be uniformly newline free. -// -// The unidiff format normally has a header for filenames and modification -// times. Any or all of these may be specified using strings for -// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. -// The modification times are normally expressed in the ISO 8601 format. -func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - wf := func(format string, args ...interface{}) error { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - return err - } - ws := func(s string) error { - _, err := buf.WriteString(s) - return err - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err - } - } - } - first, last := g[0], g[len(g)-1] - range1 := formatRangeUnified(first.I1, last.I2) - range2 := formatRangeUnified(first.J1, last.J2) - if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { - return err - } - for _, c := range g { - i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 - if c.Tag == 'e' { - for _, line := range diff.A[i1:i2] { - if err := ws(" " + line); err != nil { - return err - } - } - continue - } - if c.Tag == 'r' || c.Tag == 'd' { - for _, line := range diff.A[i1:i2] { - if err := ws("-" + line); err != nil { - return err - } - } - } - if c.Tag == 'r' || c.Tag == 'i' { - for _, line := range diff.B[j1:j2] { - if err := ws("+" + line); err != nil { - return err - } - } - } - } - } - return nil -} - -// Like WriteUnifiedDiff but returns the diff a string. -func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteUnifiedDiff(w, diff) - return string(w.Bytes()), err -} - -// Convert range to the "ed" format. -func formatRangeContext(start, stop int) string { - // Per the diff spec at http://www.unix.org/single_unix_specification/ - beginning := start + 1 // lines start numbering with one - length := stop - start - if length == 0 { - beginning -= 1 // empty ranges begin at line just before the range - } - if length <= 1 { - return fmt.Sprintf("%d", beginning) - } - return fmt.Sprintf("%d,%d", beginning, beginning+length-1) -} - -type ContextDiff UnifiedDiff - -// Compare two sequences of lines; generate the delta as a context diff. -// -// Context diffs are a compact way of showing line changes and a few -// lines of context. The number of context lines is set by diff.Context -// which defaults to three. -// -// By default, the diff control lines (those with *** or ---) are -// created with a trailing newline. -// -// For inputs that do not have trailing newlines, set the diff.Eol -// argument to "" so that the output will be uniformly newline free. -// -// The context diff format normally has a header for filenames and -// modification times. Any or all of these may be specified using -// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. -// The modification times are normally expressed in the ISO 8601 format. -// If not specified, the strings default to blanks. -func WriteContextDiff(writer io.Writer, diff ContextDiff) error { - buf := bufio.NewWriter(writer) - defer buf.Flush() - var diffErr error - wf := func(format string, args ...interface{}) { - _, err := buf.WriteString(fmt.Sprintf(format, args...)) - if diffErr == nil && err != nil { - diffErr = err - } - } - ws := func(s string) { - _, err := buf.WriteString(s) - if diffErr == nil && err != nil { - diffErr = err - } - } - - if len(diff.Eol) == 0 { - diff.Eol = "\n" - } - - prefix := map[byte]string{ - 'i': "+ ", - 'd': "- ", - 'r': "! ", - 'e': " ", - } - - started := false - m := NewMatcher(diff.A, diff.B) - for _, g := range m.GetGroupedOpCodes(diff.Context) { - if !started { - started = true - fromDate := "" - if len(diff.FromDate) > 0 { - fromDate = "\t" + diff.FromDate - } - toDate := "" - if len(diff.ToDate) > 0 { - toDate = "\t" + diff.ToDate - } - if diff.FromFile != "" || diff.ToFile != "" { - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) - } - } - - first, last := g[0], g[len(g)-1] - ws("***************" + diff.Eol) - - range1 := formatRangeContext(first.I1, last.I2) - wf("*** %s ****%s", range1, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'd' { - for _, cc := range g { - if cc.Tag == 'i' { - continue - } - for _, line := range diff.A[cc.I1:cc.I2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - - range2 := formatRangeContext(first.J1, last.J2) - wf("--- %s ----%s", range2, diff.Eol) - for _, c := range g { - if c.Tag == 'r' || c.Tag == 'i' { - for _, cc := range g { - if cc.Tag == 'd' { - continue - } - for _, line := range diff.B[cc.J1:cc.J2] { - ws(prefix[cc.Tag] + line) - } - } - break - } - } - } - return diffErr -} - -// Like WriteContextDiff but returns the diff a string. -func GetContextDiffString(diff ContextDiff) (string, error) { - w := &bytes.Buffer{} - err := WriteContextDiff(w, diff) - return string(w.Bytes()), err -} - -// Split a string on "\n" while preserving them. The output can be used -// as input for UnifiedDiff and ContextDiff structures. -func SplitLines(s string) []string { - lines := strings.SplitAfter(s, "\n") - lines[len(lines)-1] += "\n" - return lines -} diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE deleted file mode 100644 index 4b0421c..0000000 --- a/vendor/github.com/stretchr/testify/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go deleted file mode 100644 index dc20039..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go +++ /dev/null @@ -1,274 +0,0 @@ -package assert - -import ( - "fmt" - "reflect" -) - -type CompareType int - -const ( - compareLess CompareType = iota - 1 - compareEqual - compareGreater -) - -func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) { - switch kind { - case reflect.Int: - { - intobj1 := obj1.(int) - intobj2 := obj2.(int) - if intobj1 > intobj2 { - return compareGreater, true - } - if intobj1 == intobj2 { - return compareEqual, true - } - if intobj1 < intobj2 { - return compareLess, true - } - } - case reflect.Int8: - { - int8obj1 := obj1.(int8) - int8obj2 := obj2.(int8) - if int8obj1 > int8obj2 { - return compareGreater, true - } - if int8obj1 == int8obj2 { - return compareEqual, true - } - if int8obj1 < int8obj2 { - return compareLess, true - } - } - case reflect.Int16: - { - int16obj1 := obj1.(int16) - int16obj2 := obj2.(int16) - if int16obj1 > int16obj2 { - return compareGreater, true - } - if int16obj1 == int16obj2 { - return compareEqual, true - } - if int16obj1 < int16obj2 { - return compareLess, true - } - } - case reflect.Int32: - { - int32obj1 := obj1.(int32) - int32obj2 := obj2.(int32) - if int32obj1 > int32obj2 { - return compareGreater, true - } - if int32obj1 == int32obj2 { - return compareEqual, true - } - if int32obj1 < int32obj2 { - return compareLess, true - } - } - case reflect.Int64: - { - int64obj1 := obj1.(int64) - int64obj2 := obj2.(int64) - if int64obj1 > int64obj2 { - return compareGreater, true - } - if int64obj1 == int64obj2 { - return compareEqual, true - } - if int64obj1 < int64obj2 { - return compareLess, true - } - } - case reflect.Uint: - { - uintobj1 := obj1.(uint) - uintobj2 := obj2.(uint) - if uintobj1 > uintobj2 { - return compareGreater, true - } - if uintobj1 == uintobj2 { - return compareEqual, true - } - if uintobj1 < uintobj2 { - return compareLess, true - } - } - case reflect.Uint8: - { - uint8obj1 := obj1.(uint8) - uint8obj2 := obj2.(uint8) - if uint8obj1 > uint8obj2 { - return compareGreater, true - } - if uint8obj1 == uint8obj2 { - return compareEqual, true - } - if uint8obj1 < uint8obj2 { - return compareLess, true - } - } - case reflect.Uint16: - { - uint16obj1 := obj1.(uint16) - uint16obj2 := obj2.(uint16) - if uint16obj1 > uint16obj2 { - return compareGreater, true - } - if uint16obj1 == uint16obj2 { - return compareEqual, true - } - if uint16obj1 < uint16obj2 { - return compareLess, true - } - } - case reflect.Uint32: - { - uint32obj1 := obj1.(uint32) - uint32obj2 := obj2.(uint32) - if uint32obj1 > uint32obj2 { - return compareGreater, true - } - if uint32obj1 == uint32obj2 { - return compareEqual, true - } - if uint32obj1 < uint32obj2 { - return compareLess, true - } - } - case reflect.Uint64: - { - uint64obj1 := obj1.(uint64) - uint64obj2 := obj2.(uint64) - if uint64obj1 > uint64obj2 { - return compareGreater, true - } - if uint64obj1 == uint64obj2 { - return compareEqual, true - } - if uint64obj1 < uint64obj2 { - return compareLess, true - } - } - case reflect.Float32: - { - float32obj1 := obj1.(float32) - float32obj2 := obj2.(float32) - if float32obj1 > float32obj2 { - return compareGreater, true - } - if float32obj1 == float32obj2 { - return compareEqual, true - } - if float32obj1 < float32obj2 { - return compareLess, true - } - } - case reflect.Float64: - { - float64obj1 := obj1.(float64) - float64obj2 := obj2.(float64) - if float64obj1 > float64obj2 { - return compareGreater, true - } - if float64obj1 == float64obj2 { - return compareEqual, true - } - if float64obj1 < float64obj2 { - return compareLess, true - } - } - case reflect.String: - { - stringobj1 := obj1.(string) - stringobj2 := obj2.(string) - if stringobj1 > stringobj2 { - return compareGreater, true - } - if stringobj1 == stringobj2 { - return compareEqual, true - } - if stringobj1 < stringobj2 { - return compareLess, true - } - } - } - - return compareEqual, false -} - -// Greater asserts that the first element is greater than the second -// -// assert.Greater(t, 2, 1) -// assert.Greater(t, float64(2), float64(1)) -// assert.Greater(t, "b", "a") -func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqual(t, 2, 1) -// assert.GreaterOrEqual(t, 2, 2) -// assert.GreaterOrEqual(t, "b", "a") -// assert.GreaterOrEqual(t, "b", "b") -func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs) -} - -// Less asserts that the first element is less than the second -// -// assert.Less(t, 1, 2) -// assert.Less(t, float64(1), float64(2)) -// assert.Less(t, "a", "b") -func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// assert.LessOrEqual(t, 1, 2) -// assert.LessOrEqual(t, 2, 2) -// assert.LessOrEqual(t, "a", "b") -// assert.LessOrEqual(t, "b", "b") -func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs) -} - -func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - e1Kind := reflect.ValueOf(e1).Kind() - e2Kind := reflect.ValueOf(e2).Kind() - if e1Kind != e2Kind { - return Fail(t, "Elements should be the same type", msgAndArgs...) - } - - compareResult, isComparable := compare(e1, e2, e1Kind) - if !isComparable { - return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) - } - - if !containsValue(allowedComparesResults, compareResult) { - return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) - } - - return true -} - -func containsValue(values []CompareType, value CompareType) bool { - for _, v := range values { - if v == value { - return true - } - } - - return false -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go deleted file mode 100644 index 49370eb..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go +++ /dev/null @@ -1,644 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Conditionf uses a Comparison to assert a complex condition. -func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Condition(t, comp, append([]interface{}{msg}, args...)...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted") -// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted") -// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted") -func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Contains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return DirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Emptyf(t, obj, "error message %s", "formatted") -func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Empty(t, object, append([]interface{}{msg}, args...)...) -} - -// Equalf asserts that two objects are equal. -// -// assert.Equalf(t, 123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Equal(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted") -func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") -func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Errorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Error(t, err, append([]interface{}{msg}, args...)...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted") -func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Failf reports a failure through -func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// FailNowf fails test -func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...) -} - -// Falsef asserts that the specified value is false. -// -// assert.Falsef(t, myBool, "error message %s", "formatted") -func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return False(t, value, append([]interface{}{msg}, args...)...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return FileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// Greaterf asserts that the first element is greater than the second -// -// assert.Greaterf(t, 2, 1, "error message %s", "formatted") -// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted") -// assert.Greaterf(t, "b", "a", "error message %s", "formatted") -func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Greater(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted") -// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted") -func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...) -} - -// IsTypef asserts that the specified objects are of the same type. -func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// assert.Lenf(t, mySlice, 3, "error message %s", "formatted") -func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Len(t, object, length, append([]interface{}{msg}, args...)...) -} - -// Lessf asserts that the first element is less than the second -// -// assert.Lessf(t, 1, 2, "error message %s", "formatted") -// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted") -// assert.Lessf(t, "a", "b", "error message %s", "formatted") -func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Less(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted") -// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted") -// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted") -func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...) -} - -// Nilf asserts that the specified object is nil. -// -// assert.Nilf(t, err, "error message %s", "formatted") -func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Nil(t, object, append([]interface{}{msg}, args...)...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoDirExists(t, path, append([]interface{}{msg}, args...)...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoErrorf(t, err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoError(t, err, append([]interface{}{msg}, args...)...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NoFileExists(t, path, append([]interface{}{msg}, args...)...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted") -// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted") -func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotContains(t, s, contains, append([]interface{}{msg}, args...)...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEmpty(t, object, append([]interface{}{msg}, args...)...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted") -func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotNilf asserts that the specified object is not nil. -// -// assert.NotNilf(t, err, "error message %s", "formatted") -func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotNil(t, object, append([]interface{}{msg}, args...)...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted") -func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotPanics(t, f, append([]interface{}{msg}, args...)...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted") -func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// NotZerof asserts that i is not the zero value for its type. -func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return NotZero(t, i, append([]interface{}{msg}, args...)...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted") -func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Panics(t, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted") -func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Regexp(t, rx, str, append([]interface{}{msg}, args...)...) -} - -// Samef asserts that two pointers reference the same object. -// -// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Same(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Subset(t, list, subset, append([]interface{}{msg}, args...)...) -} - -// Truef asserts that the specified value is true. -// -// assert.Truef(t, myBool, "error message %s", "formatted") -func Truef(t TestingT, value bool, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return True(t, value, append([]interface{}{msg}, args...)...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...) -} - -// Zerof asserts that i is the zero value for its type. -func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Zero(t, i, append([]interface{}{msg}, args...)...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl deleted file mode 100644 index d2bb0b8..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentFormat}} -func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool { - if h, ok := t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go deleted file mode 100644 index 9db8894..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go +++ /dev/null @@ -1,1276 +0,0 @@ -/* -* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen -* THIS FILE MUST NOT BE EDITED BY HAND - */ - -package assert - -import ( - http "net/http" - url "net/url" - time "time" -) - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Condition(a.t, comp, msgAndArgs...) -} - -// Conditionf uses a Comparison to assert a complex condition. -func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Conditionf(a.t, comp, msg, args...) -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Contains("Hello World", "World") -// a.Contains(["Hello", "World"], "World") -// a.Contains({"Hello": "World"}, "Hello") -func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Contains(a.t, s, contains, msgAndArgs...) -} - -// Containsf asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// a.Containsf("Hello World", "World", "error message %s", "formatted") -// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted") -// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted") -func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Containsf(a.t, s, contains, msg, args...) -} - -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExists(a.t, path, msgAndArgs...) -} - -// DirExistsf checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return DirExistsf(a.t, path, msg, args...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2]) -func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatch(a.t, listA, listB, msgAndArgs...) -} - -// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted") -func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return ElementsMatchf(a.t, listA, listB, msg, args...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Empty(a.t, object, msgAndArgs...) -} - -// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// a.Emptyf(obj, "error message %s", "formatted") -func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Emptyf(a.t, object, msg, args...) -} - -// Equal asserts that two objects are equal. -// -// a.Equal(123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualError(err, expectedErrorString) -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// EqualErrorf asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted") -func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualErrorf(a.t, theError, errString, msg, args...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValues(uint32(123), int32(123)) -func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// EqualValuesf asserts that two objects are equal or convertable to the same types -// and equal. -// -// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") -func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return EqualValuesf(a.t, expected, actual, msg, args...) -} - -// Equalf asserts that two objects are equal. -// -// a.Equalf(123, 123, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Equalf(a.t, expected, actual, msg, args...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Error(err) { -// assert.Equal(t, expectedError, err) -// } -func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Error(a.t, err, msgAndArgs...) -} - -// Errorf asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if a.Errorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedErrorf, err) -// } -func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Errorf(a.t, err, msg, args...) -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventually(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Eventuallyf asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Eventuallyf(a.t, condition, waitFor, tick, msg, args...) -} - -// Exactly asserts that two objects are equal in value and type. -// -// a.Exactly(int32(123), int64(123)) -func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// Exactlyf asserts that two objects are equal in value and type. -// -// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted") -func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Exactlyf(a.t, expected, actual, msg, args...) -} - -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// FailNow fails test -func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNow(a.t, failureMessage, msgAndArgs...) -} - -// FailNowf fails test -func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FailNowf(a.t, failureMessage, msg, args...) -} - -// Failf reports a failure through -func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Failf(a.t, failureMessage, msg, args...) -} - -// False asserts that the specified value is false. -// -// a.False(myBool) -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return False(a.t, value, msgAndArgs...) -} - -// Falsef asserts that the specified value is false. -// -// a.Falsef(myBool, "error message %s", "formatted") -func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Falsef(a.t, value, msg, args...) -} - -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExists(a.t, path, msgAndArgs...) -} - -// FileExistsf checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return FileExistsf(a.t, path, msg, args...) -} - -// Greater asserts that the first element is greater than the second -// -// a.Greater(2, 1) -// a.Greater(float64(2), float64(1)) -// a.Greater("b", "a") -func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greater(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqual asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqual(2, 1) -// a.GreaterOrEqual(2, 2) -// a.GreaterOrEqual("b", "a") -// a.GreaterOrEqual("b", "b") -func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// GreaterOrEqualf asserts that the first element is greater than or equal to the second -// -// a.GreaterOrEqualf(2, 1, "error message %s", "formatted") -// a.GreaterOrEqualf(2, 2, "error message %s", "formatted") -// a.GreaterOrEqualf("b", "a", "error message %s", "formatted") -// a.GreaterOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return GreaterOrEqualf(a.t, e1, e2, msg, args...) -} - -// Greaterf asserts that the first element is greater than the second -// -// a.Greaterf(2, 1, "error message %s", "formatted") -// a.Greaterf(float64(2), float64(1), "error message %s", "formatted") -// a.Greaterf("b", "a", "error message %s", "formatted") -func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Greaterf(a.t, e1, e2, msg, args...) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyContainsf asserts that a specified handler returns a -// body that contains a string. -// -// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...) -} - -// HTTPBodyNotContainsf asserts that a specified handler returns a -// body that does not contain a string. -// -// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPError(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPErrorf asserts that a specified handler returns an error status code. -// -// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPErrorf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPRedirectf asserts that a specified handler returns a redirect status code. -// -// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPRedirectf(a.t, handler, method, url, values, msg, args...) -} - -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...) -} - -// HTTPStatusCodef asserts that a specified handler returns a specified status code. -// -// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...) -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...) -} - -// HTTPSuccessf asserts that a specified handler returns a success status code. -// -// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return HTTPSuccessf(a.t, handler, method, url, values, msg, args...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// a.Implements((*MyInterface)(nil), new(MyObject)) -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// Implementsf asserts that an object is implemented by the specified interface. -// -// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted") -func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Implementsf(a.t, interfaceObject, object, msg, args...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// a.InDelta(math.Pi, 22/7.0, 0.01) -func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDeltaSlicef is the same as InDelta, except it compares two slices. -func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaSlicef(a.t, expected, actual, delta, msg, args...) -} - -// InDeltaf asserts that the two numerals are within delta of each other. -// -// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted") -func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InDeltaf(a.t, expected, actual, delta, msg, args...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices. -func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...) -} - -// InEpsilonf asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return InEpsilonf(a.t, expected, actual, epsilon, msg, args...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// IsTypef asserts that the specified objects are of the same type. -func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return IsTypef(a.t, expectedType, object, msg, args...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEq(a.t, expected, actual, msgAndArgs...) -} - -// JSONEqf asserts that two JSON strings are equivalent. -// -// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted") -func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return JSONEqf(a.t, expected, actual, msg, args...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// a.Len(mySlice, 3) -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Len(a.t, object, length, msgAndArgs...) -} - -// Lenf asserts that the specified object has specific length. -// Lenf also fails if the object has a type that len() not accept. -// -// a.Lenf(mySlice, 3, "error message %s", "formatted") -func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lenf(a.t, object, length, msg, args...) -} - -// Less asserts that the first element is less than the second -// -// a.Less(1, 2) -// a.Less(float64(1), float64(2)) -// a.Less("a", "b") -func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Less(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqual asserts that the first element is less than or equal to the second -// -// a.LessOrEqual(1, 2) -// a.LessOrEqual(2, 2) -// a.LessOrEqual("a", "b") -// a.LessOrEqual("b", "b") -func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqual(a.t, e1, e2, msgAndArgs...) -} - -// LessOrEqualf asserts that the first element is less than or equal to the second -// -// a.LessOrEqualf(1, 2, "error message %s", "formatted") -// a.LessOrEqualf(2, 2, "error message %s", "formatted") -// a.LessOrEqualf("a", "b", "error message %s", "formatted") -// a.LessOrEqualf("b", "b", "error message %s", "formatted") -func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return LessOrEqualf(a.t, e1, e2, msg, args...) -} - -// Lessf asserts that the first element is less than the second -// -// a.Lessf(1, 2, "error message %s", "formatted") -// a.Lessf(float64(1), float64(2), "error message %s", "formatted") -// a.Lessf("a", "b", "error message %s", "formatted") -func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Lessf(a.t, e1, e2, msg, args...) -} - -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond) -func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Never(a.t, condition, waitFor, tick, msgAndArgs...) -} - -// Neverf asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted") -func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Neverf(a.t, condition, waitFor, tick, msg, args...) -} - -// Nil asserts that the specified object is nil. -// -// a.Nil(err) -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nil(a.t, object, msgAndArgs...) -} - -// Nilf asserts that the specified object is nil. -// -// a.Nilf(err, "error message %s", "formatted") -func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Nilf(a.t, object, msg, args...) -} - -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExists(a.t, path, msgAndArgs...) -} - -// NoDirExistsf checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoDirExistsf(a.t, path, msg, args...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoError(err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoError(a.t, err, msgAndArgs...) -} - -// NoErrorf asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if a.NoErrorf(err, "error message %s", "formatted") { -// assert.Equal(t, expectedObj, actualObj) -// } -func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoErrorf(a.t, err, msg, args...) -} - -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExists(a.t, path, msgAndArgs...) -} - -// NoFileExistsf checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NoFileExistsf(a.t, path, msg, args...) -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContains("Hello World", "Earth") -// a.NotContains(["Hello", "World"], "Earth") -// a.NotContains({"Hello": "World"}, "Earth") -func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted") -// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted") -// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted") -func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotContainsf(a.t, s, contains, msg, args...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmpty(obj) { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmpty(a.t, object, msgAndArgs...) -} - -// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if a.NotEmptyf(obj, "error message %s", "formatted") { -// assert.Equal(t, "two", obj[1]) -// } -func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEmptyf(a.t, object, msg, args...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// a.NotEqual(obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValues(obj1, obj2) -func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValues(a.t, expected, actual, msgAndArgs...) -} - -// NotEqualValuesf asserts that two objects are not equal even when converted to the same type -// -// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted") -func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualValuesf(a.t, expected, actual, msg, args...) -} - -// NotEqualf asserts that the specified values are NOT equal. -// -// a.NotEqualf(obj1, obj2, "error message %s", "formatted") -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotEqualf(a.t, expected, actual, msg, args...) -} - -// NotNil asserts that the specified object is not nil. -// -// a.NotNil(err) -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNil(a.t, object, msgAndArgs...) -} - -// NotNilf asserts that the specified object is not nil. -// -// a.NotNilf(err, "error message %s", "formatted") -func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotNilf(a.t, object, msg, args...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanics(func(){ RemainCalm() }) -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanics(a.t, f, msgAndArgs...) -} - -// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted") -func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotPanicsf(a.t, f, msg, args...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") -// a.NotRegexp("^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexpf asserts that a specified regexp does not match a string. -// -// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted") -// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted") -func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotRegexpf(a.t, rx, str, msg, args...) -} - -// NotSame asserts that two pointers do not reference the same object. -// -// a.NotSame(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSame(a.t, expected, actual, msgAndArgs...) -} - -// NotSamef asserts that two pointers do not reference the same object. -// -// a.NotSamef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSamef(a.t, expected, actual, msg, args...) -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubset(a.t, list, subset, msgAndArgs...) -} - -// NotSubsetf asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotSubsetf(a.t, list, subset, msg, args...) -} - -// NotZero asserts that i is not the zero value for its type. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZero(a.t, i, msgAndArgs...) -} - -// NotZerof asserts that i is not the zero value for its type. -func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return NotZerof(a.t, i, msg, args...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panics(func(){ GoCrazy() }) -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panics(a.t, f, msgAndArgs...) -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithError("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithError(a.t, errString, f, msgAndArgs...) -} - -// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithErrorf(a.t, errString, f, msg, args...) -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValue("crazy error", func(){ GoCrazy() }) -func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValue(a.t, expected, f, msgAndArgs...) -} - -// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return PanicsWithValuef(a.t, expected, f, msg, args...) -} - -// Panicsf asserts that the code inside the specified PanicTestFunc panics. -// -// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted") -func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Panicsf(a.t, f, msg, args...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// a.Regexp(regexp.MustCompile("start"), "it's starting") -// a.Regexp("start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// Regexpf asserts that a specified regexp matches a string. -// -// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted") -// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted") -func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Regexpf(a.t, rx, str, msg, args...) -} - -// Same asserts that two pointers reference the same object. -// -// a.Same(ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Same(a.t, expected, actual, msgAndArgs...) -} - -// Samef asserts that two pointers reference the same object. -// -// a.Samef(ptr1, ptr2, "error message %s", "formatted") -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Samef(a.t, expected, actual, msg, args...) -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subset(a.t, list, subset, msgAndArgs...) -} - -// Subsetf asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") -func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Subsetf(a.t, list, subset, msg, args...) -} - -// True asserts that the specified value is true. -// -// a.True(myBool) -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return True(a.t, value, msgAndArgs...) -} - -// Truef asserts that the specified value is true. -// -// a.Truef(myBool, "error message %s", "formatted") -func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Truef(a.t, value, msg, args...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// a.WithinDuration(time.Now(), time.Now(), 10*time.Second) -func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// WithinDurationf asserts that the two times are within duration delta of each other. -// -// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted") -func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return WithinDurationf(a.t, expected, actual, delta, msg, args...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEq(a.t, expected, actual, msgAndArgs...) -} - -// YAMLEqf asserts that two YAML strings are equivalent. -func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return YAMLEqf(a.t, expected, actual, msg, args...) -} - -// Zero asserts that i is the zero value for its type. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zero(a.t, i, msgAndArgs...) -} - -// Zerof asserts that i is the zero value for its type. -func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool { - if h, ok := a.t.(tHelper); ok { - h.Helper() - } - return Zerof(a.t, i, msg, args...) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl deleted file mode 100644 index 188bb9e..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl +++ /dev/null @@ -1,5 +0,0 @@ -{{.CommentWithoutT "a"}} -func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { - if h, ok := a.t.(tHelper); ok { h.Helper() } - return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) -} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go deleted file mode 100644 index 914a10d..0000000 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ /dev/null @@ -1,1695 +0,0 @@ -package assert - -import ( - "bufio" - "bytes" - "encoding/json" - "errors" - "fmt" - "math" - "os" - "reflect" - "regexp" - "runtime" - "runtime/debug" - "strings" - "time" - "unicode" - "unicode/utf8" - - "github.com/davecgh/go-spew/spew" - "github.com/pmezard/go-difflib/difflib" - yaml "gopkg.in/yaml.v3" -) - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" - -// TestingT is an interface wrapper around *testing.T -type TestingT interface { - Errorf(format string, args ...interface{}) -} - -// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful -// for table driven tests. -type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool - -// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful -// for table driven tests. -type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool - -// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful -// for table driven tests. -type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool - -// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful -// for table driven tests. -type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool - -// Comparison is a custom function that returns true on success and false on failure -type Comparison func() (success bool) - -/* - Helper functions -*/ - -// ObjectsAreEqual determines if two objects are considered equal. -// -// This function does no assertion of any kind. -func ObjectsAreEqual(expected, actual interface{}) bool { - if expected == nil || actual == nil { - return expected == actual - } - - exp, ok := expected.([]byte) - if !ok { - return reflect.DeepEqual(expected, actual) - } - - act, ok := actual.([]byte) - if !ok { - return false - } - if exp == nil || act == nil { - return exp == nil && act == nil - } - return bytes.Equal(exp, act) -} - -// ObjectsAreEqualValues gets whether two objects are equal, or if their -// values are equal. -func ObjectsAreEqualValues(expected, actual interface{}) bool { - if ObjectsAreEqual(expected, actual) { - return true - } - - actualType := reflect.TypeOf(actual) - if actualType == nil { - return false - } - expectedValue := reflect.ValueOf(expected) - if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { - // Attempt comparison after type conversion - return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) - } - - return false -} - -/* CallerInfo is necessary because the assert functions use the testing object -internally, causing it to print the file:line of the assert method, rather than where -the problem actually occurred in calling code.*/ - -// CallerInfo returns an array of strings containing the file and line number -// of each stack frame leading from the current test to the assert call that -// failed. -func CallerInfo() []string { - - var pc uintptr - var ok bool - var file string - var line int - var name string - - callers := []string{} - for i := 0; ; i++ { - pc, file, line, ok = runtime.Caller(i) - if !ok { - // The breaks below failed to terminate the loop, and we ran off the - // end of the call stack. - break - } - - // This is a huge edge case, but it will panic if this is the case, see #180 - if file == "" { - break - } - - f := runtime.FuncForPC(pc) - if f == nil { - break - } - name = f.Name() - - // testing.tRunner is the standard library function that calls - // tests. Subtests are called directly by tRunner, without going through - // the Test/Benchmark/Example function that contains the t.Run calls, so - // with subtests we should break when we hit tRunner, without adding it - // to the list of callers. - if name == "testing.tRunner" { - break - } - - parts := strings.Split(file, "/") - file = parts[len(parts)-1] - if len(parts) > 1 { - dir := parts[len(parts)-2] - if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" { - callers = append(callers, fmt.Sprintf("%s:%d", file, line)) - } - } - - // Drop the package - segments := strings.Split(name, ".") - name = segments[len(segments)-1] - if isTest(name, "Test") || - isTest(name, "Benchmark") || - isTest(name, "Example") { - break - } - } - - return callers -} - -// Stolen from the `go test` tool. -// isTest tells whether name looks like a test (or benchmark, according to prefix). -// It is a Test (say) if there is a character after Test that is not a lower-case letter. -// We don't want TesticularCancer. -func isTest(name, prefix string) bool { - if !strings.HasPrefix(name, prefix) { - return false - } - if len(name) == len(prefix) { // "Test" is ok - return true - } - rune, _ := utf8.DecodeRuneInString(name[len(prefix):]) - return !unicode.IsLower(rune) -} - -func messageFromMsgAndArgs(msgAndArgs ...interface{}) string { - if len(msgAndArgs) == 0 || msgAndArgs == nil { - return "" - } - if len(msgAndArgs) == 1 { - msg := msgAndArgs[0] - if msgAsStr, ok := msg.(string); ok { - return msgAsStr - } - return fmt.Sprintf("%+v", msg) - } - if len(msgAndArgs) > 1 { - return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...) - } - return "" -} - -// Aligns the provided message so that all lines after the first line start at the same location as the first line. -// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). -// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the -// basis on which the alignment occurs). -func indentMessageLines(message string, longestLabelLen int) string { - outBuf := new(bytes.Buffer) - - for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ { - // no need to align first line because it starts at the correct location (after the label) - if i != 0 { - // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab - outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t") - } - outBuf.WriteString(scanner.Text()) - } - - return outBuf.String() -} - -type failNower interface { - FailNow() -} - -// FailNow fails test -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, failureMessage, msgAndArgs...) - - // We cannot extend TestingT with FailNow() and - // maintain backwards compatibility, so we fallback - // to panicking when FailNow is not available in - // TestingT. - // See issue #263 - - if t, ok := t.(failNower); ok { - t.FailNow() - } else { - panic("test failed and t is missing `FailNow()`") - } - return false -} - -// Fail reports a failure through -func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - content := []labeledContent{ - {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")}, - {"Error", failureMessage}, - } - - // Add test name if the Go version supports it - if n, ok := t.(interface { - Name() string - }); ok { - content = append(content, labeledContent{"Test", n.Name()}) - } - - message := messageFromMsgAndArgs(msgAndArgs...) - if len(message) > 0 { - content = append(content, labeledContent{"Messages", message}) - } - - t.Errorf("\n%s", ""+labeledOutput(content...)) - - return false -} - -type labeledContent struct { - label string - content string -} - -// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner: -// -// \t{{label}}:{{align_spaces}}\t{{content}}\n -// -// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label. -// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this -// alignment is achieved, "\t{{content}}\n" is added for the output. -// -// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line. -func labeledOutput(content ...labeledContent) string { - longestLabel := 0 - for _, v := range content { - if len(v.label) > longestLabel { - longestLabel = len(v.label) - } - } - var output string - for _, v := range content { - output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n" - } - return output -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements(t, (*MyInterface)(nil), new(MyObject)) -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - interfaceType := reflect.TypeOf(interfaceObject).Elem() - - if object == nil { - return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...) - } - if !reflect.TypeOf(object).Implements(interfaceType) { - return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...) - } - - return true -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { - return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) - } - - return true -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(t, 123, 123) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). Function equality -// cannot be determined and will always fail. -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if !ObjectsAreEqual(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// validateEqualArgs checks whether provided arguments can be safely used in the -// Equal/NotEqual functions. -func validateEqualArgs(expected, actual interface{}) error { - if expected == nil && actual == nil { - return nil - } - - if isFunction(expected) || isFunction(actual) { - return errors.New("cannot take func type as argument") - } - return nil -} - -// Same asserts that two pointers reference the same object. -// -// assert.Same(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !samePointers(expected, actual) { - return Fail(t, fmt.Sprintf("Not same: \n"+ - "expected: %p %#v\n"+ - "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) - } - - return true -} - -// NotSame asserts that two pointers do not reference the same object. -// -// assert.NotSame(t, ptr1, ptr2) -// -// Both arguments must be pointer variables. Pointer variable sameness is -// determined based on the equality of both type and value. -func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if samePointers(expected, actual) { - return Fail(t, fmt.Sprintf( - "Expected and actual point to the same object: %p %#v", - expected, expected), msgAndArgs...) - } - return true -} - -// samePointers compares two generic interface objects and returns whether -// they point to the same object -func samePointers(first, second interface{}) bool { - firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) - if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { - return false - } - - firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) - if firstType != secondType { - return false - } - - // compare pointer addresses - return first == second -} - -// formatUnequalValues takes two values of arbitrary types and returns string -// representations appropriate to be presented to the user. -// -// If the values are not of like type, the returned strings will be prefixed -// with the type name, and the value will be enclosed in parenthesis similar -// to a type conversion in the Go grammar. -func formatUnequalValues(expected, actual interface{}) (e string, a string) { - if reflect.TypeOf(expected) != reflect.TypeOf(actual) { - return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)), - fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual)) - } - switch expected.(type) { - case time.Duration: - return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual) - } - return truncatingFormat(expected), truncatingFormat(actual) -} - -// truncatingFormat formats the data and truncates it if it's too long. -// -// This helps keep formatted error messages lines from exceeding the -// bufio.MaxScanTokenSize max line length that the go testing framework imposes. -func truncatingFormat(data interface{}) string { - value := fmt.Sprintf("%#v", data) - max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed. - if len(value) > max { - value = value[0:max] + "<... truncated>" - } - return value -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(t, uint32(123), int32(123)) -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if !ObjectsAreEqualValues(expected, actual) { - diff := diff(expected, actual) - expected, actual = formatUnequalValues(expected, actual) - return Fail(t, fmt.Sprintf("Not equal: \n"+ - "expected: %s\n"+ - "actual : %s%s", expected, actual, diff), msgAndArgs...) - } - - return true - -} - -// Exactly asserts that two objects are equal in value and type. -// -// assert.Exactly(t, int32(123), int64(123)) -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - aType := reflect.TypeOf(expected) - bType := reflect.TypeOf(actual) - - if aType != bType { - return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) - } - - return Equal(t, expected, actual, msgAndArgs...) - -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(t, err) -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if !isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Expected value not to be nil.", msgAndArgs...) -} - -// containsKind checks if a specified kind in the slice of kinds. -func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool { - for i := 0; i < len(kinds); i++ { - if kind == kinds[i] { - return true - } - } - - return false -} - -// isNil checks if a specified object is nil or not, without Failing. -func isNil(object interface{}) bool { - if object == nil { - return true - } - - value := reflect.ValueOf(object) - kind := value.Kind() - isNilableKind := containsKind( - []reflect.Kind{ - reflect.Chan, reflect.Func, - reflect.Interface, reflect.Map, - reflect.Ptr, reflect.Slice}, - kind) - - if isNilableKind && value.IsNil() { - return true - } - - return false -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(t, err) -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - if isNil(object) { - return true - } - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...) -} - -// isEmpty gets whether the specified object is considered empty or not. -func isEmpty(object interface{}) bool { - - // get nil case out of the way - if object == nil { - return true - } - - objValue := reflect.ValueOf(object) - - switch objValue.Kind() { - // collection types are empty when they have no element - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - return objValue.Len() == 0 - // pointers are empty if nil or if the value they point to is empty - case reflect.Ptr: - if objValue.IsNil() { - return true - } - deref := objValue.Elem().Interface() - return isEmpty(deref) - // for all other types, compare against the zero value - default: - zero := reflect.Zero(objValue.Type()) - return reflect.DeepEqual(object, zero.Interface()) - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// assert.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// if assert.NotEmpty(t, obj) { -// assert.Equal(t, "two", obj[1]) -// } -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { - pass := !isEmpty(object) - if !pass { - if h, ok := t.(tHelper); ok { - h.Helper() - } - Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...) - } - - return pass - -} - -// getLen try to get length of object. -// return (false, 0) if impossible. -func getLen(x interface{}) (ok bool, length int) { - v := reflect.ValueOf(x) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - return true, v.Len() -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(t, mySlice, 3) -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - ok, l := getLen(object) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) - } - - if l != length { - return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) - } - return true -} - -// True asserts that the specified value is true. -// -// assert.True(t, myBool) -func True(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if !value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be true", msgAndArgs...) - } - - return true - -} - -// False asserts that the specified value is false. -// -// assert.False(t, myBool) -func False(t TestingT, value bool, msgAndArgs ...interface{}) bool { - if value { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "Should be false", msgAndArgs...) - } - - return true - -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(t, obj1, obj2) -// -// Pointer variable equality is determined based on the equality of the -// referenced values (as opposed to the memory addresses). -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if err := validateEqualArgs(expected, actual); err != nil { - return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)", - expected, actual, err), msgAndArgs...) - } - - if ObjectsAreEqual(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true - -} - -// NotEqualValues asserts that two objects are not equal even when converted to the same type -// -// assert.NotEqualValues(t, obj1, obj2) -func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if ObjectsAreEqualValues(expected, actual) { - return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...) - } - - return true -} - -// containsElement try loop over the list check if the list includes the element. -// return (false, false) if impossible. -// return (true, false) if element was not found. -// return (true, true) if element was found. -func includeElement(list interface{}, element interface{}) (ok, found bool) { - - listValue := reflect.ValueOf(list) - listKind := reflect.TypeOf(list).Kind() - defer func() { - if e := recover(); e != nil { - ok = false - found = false - } - }() - - if listKind == reflect.String { - elementValue := reflect.ValueOf(element) - return true, strings.Contains(listValue.String(), elementValue.String()) - } - - if listKind == reflect.Map { - mapKeys := listValue.MapKeys() - for i := 0; i < len(mapKeys); i++ { - if ObjectsAreEqual(mapKeys[i].Interface(), element) { - return true, true - } - } - return true, false - } - - for i := 0; i < listValue.Len(); i++ { - if ObjectsAreEqual(listValue.Index(i).Interface(), element) { - return true, true - } - } - return true, false - -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// assert.Contains(t, "Hello World", "World") -// assert.Contains(t, ["Hello", "World"], "World") -// assert.Contains(t, {"Hello": "World"}, "Hello") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...) - } - - return true - -} - -// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the -// specified substring or element. -// -// assert.NotContains(t, "Hello World", "Earth") -// assert.NotContains(t, ["Hello", "World"], "Earth") -// assert.NotContains(t, {"Hello": "World"}, "Earth") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ok, found := includeElement(s, contains) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...) - } - if found { - return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...) - } - - return true - -} - -// Subset asserts that the specified list(array, slice...) contains all -// elements given in the specified subset(array, slice...). -// -// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") -func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return true // we consider nil to be equal to the nil set - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...) - } - } - - return true -} - -// NotSubset asserts that the specified list(array, slice...) contains not all -// elements given in the specified subset(array, slice...). -// -// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") -func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if subset == nil { - return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...) - } - - subsetValue := reflect.ValueOf(subset) - defer func() { - if e := recover(); e != nil { - ok = false - } - }() - - listKind := reflect.TypeOf(list).Kind() - subsetKind := reflect.TypeOf(subset).Kind() - - if listKind != reflect.Array && listKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...) - } - - if subsetKind != reflect.Array && subsetKind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) - } - - for i := 0; i < subsetValue.Len(); i++ { - element := subsetValue.Index(i).Interface() - ok, found := includeElement(list, element) - if !ok { - return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) - } - if !found { - return true - } - } - - return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...) -} - -// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified -// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements, -// the number of appearances of each of them in both lists should match. -// -// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2]) -func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if isEmpty(listA) && isEmpty(listB) { - return true - } - - if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) { - return false - } - - extraA, extraB := diffLists(listA, listB) - - if len(extraA) == 0 && len(extraB) == 0 { - return true - } - - return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...) -} - -// isList checks that the provided value is array or slice. -func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) { - kind := reflect.TypeOf(list).Kind() - if kind != reflect.Array && kind != reflect.Slice { - return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind), - msgAndArgs...) - } - return true -} - -// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B. -// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and -// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored. -func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) { - aValue := reflect.ValueOf(listA) - bValue := reflect.ValueOf(listB) - - aLen := aValue.Len() - bLen := bValue.Len() - - // Mark indexes in bValue that we already used - visited := make([]bool, bLen) - for i := 0; i < aLen; i++ { - element := aValue.Index(i).Interface() - found := false - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - if ObjectsAreEqual(bValue.Index(j).Interface(), element) { - visited[j] = true - found = true - break - } - } - if !found { - extraA = append(extraA, element) - } - } - - for j := 0; j < bLen; j++ { - if visited[j] { - continue - } - extraB = append(extraB, bValue.Index(j).Interface()) - } - - return -} - -func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string { - var msg bytes.Buffer - - msg.WriteString("elements differ") - if len(extraA) > 0 { - msg.WriteString("\n\nextra elements in list A:\n") - msg.WriteString(spewConfig.Sdump(extraA)) - } - if len(extraB) > 0 { - msg.WriteString("\n\nextra elements in list B:\n") - msg.WriteString(spewConfig.Sdump(extraB)) - } - msg.WriteString("\n\nlistA:\n") - msg.WriteString(spewConfig.Sdump(listA)) - msg.WriteString("\n\nlistB:\n") - msg.WriteString(spewConfig.Sdump(listB)) - - return msg.String() -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - result := comp() - if !result { - Fail(t, "Condition failed!", msgAndArgs...) - } - return result -} - -// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics -// methods, and represents a simple func that takes no arguments, and returns nothing. -type PanicTestFunc func() - -// didPanic returns true if the function passed to it panics. Otherwise, it returns false. -func didPanic(f PanicTestFunc) (bool, interface{}, string) { - - didPanic := false - var message interface{} - var stack string - func() { - - defer func() { - if message = recover(); message != nil { - didPanic = true - stack = string(debug.Stack()) - } - }() - - // call the target function - f() - - }() - - return didPanic, message, stack - -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(t, func(){ GoCrazy() }) -func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - - return true -} - -// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that -// the recovered panic value equals the expected panic value. -// -// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - if panicValue != expected { - return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// PanicsWithError asserts that the code inside the specified PanicTestFunc -// panics, and that the recovered panic value is an error that satisfies the -// EqualError comparison. -// -// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() }) -func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - funcDidPanic, panicValue, panickedStack := didPanic(f) - if !funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...) - } - panicErr, ok := panicValue.(error) - if !ok || panicErr.Error() != errString { - return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(t, func(){ RemainCalm() }) -func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic { - return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...) - } - - return true -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second) -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - dt := expected.Sub(actual) - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -func toFloat(x interface{}) (float64, bool) { - var xf float64 - xok := true - - switch xn := x.(type) { - case uint: - xf = float64(xn) - case uint8: - xf = float64(xn) - case uint16: - xf = float64(xn) - case uint32: - xf = float64(xn) - case uint64: - xf = float64(xn) - case int: - xf = float64(xn) - case int8: - xf = float64(xn) - case int16: - xf = float64(xn) - case int32: - xf = float64(xn) - case int64: - xf = float64(xn) - case float32: - xf = float64(xn) - case float64: - xf = xn - case time.Duration: - xf = float64(xn) - default: - xok = false - } - - return xf, xok -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, 22/7.0, 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...) - } - - if math.IsNaN(af) { - return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...) - } - - if math.IsNaN(bf) { - return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...) - } - - dt := af - bf - if dt < -delta || dt > delta { - return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...) - } - - return true -} - -// InDeltaSlice is the same as InDelta, except it compares two slices. -func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...) - if !result { - return result - } - } - - return true -} - -// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys. -func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Map || - reflect.TypeOf(expected).Kind() != reflect.Map { - return Fail(t, "Arguments must be maps", msgAndArgs...) - } - - expectedMap := reflect.ValueOf(expected) - actualMap := reflect.ValueOf(actual) - - if expectedMap.Len() != actualMap.Len() { - return Fail(t, "Arguments must have the same number of keys", msgAndArgs...) - } - - for _, k := range expectedMap.MapKeys() { - ev := expectedMap.MapIndex(k) - av := actualMap.MapIndex(k) - - if !ev.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...) - } - - if !av.IsValid() { - return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...) - } - - if !InDelta( - t, - ev.Interface(), - av.Interface(), - delta, - msgAndArgs..., - ) { - return false - } - } - - return true -} - -func calcRelativeError(expected, actual interface{}) (float64, error) { - af, aok := toFloat(expected) - if !aok { - return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) - } - if math.IsNaN(af) { - return 0, errors.New("expected value must not be NaN") - } - if af == 0 { - return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") - } - bf, bok := toFloat(actual) - if !bok { - return 0, fmt.Errorf("actual value %q cannot be converted to float", actual) - } - if math.IsNaN(bf) { - return 0, errors.New("actual value must not be NaN") - } - - return math.Abs(af-bf) / math.Abs(af), nil -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if math.IsNaN(epsilon) { - return Fail(t, "epsilon must not be NaN") - } - actualEpsilon, err := calcRelativeError(expected, actual) - if err != nil { - return Fail(t, err.Error(), msgAndArgs...) - } - if actualEpsilon > epsilon { - return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ - " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...) - } - - return true -} - -// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if expected == nil || actual == nil || - reflect.TypeOf(actual).Kind() != reflect.Slice || - reflect.TypeOf(expected).Kind() != reflect.Slice { - return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...) - } - - actualSlice := reflect.ValueOf(actual) - expectedSlice := reflect.ValueOf(expected) - - for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) - if !result { - return result - } - } - - return true -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(t, err) { -// assert.Equal(t, expectedObj, actualObj) -// } -func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err != nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...) - } - - return true -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(t, err) { -// assert.Equal(t, expectedError, err) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { - if err == nil { - if h, ok := t.(tHelper); ok { - h.Helper() - } - return Fail(t, "An error is expected but got nil.", msgAndArgs...) - } - - return true -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// assert.EqualError(t, err, expectedErrorString) -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if !Error(t, theError, msgAndArgs...) { - return false - } - expected := errString - actual := theError.Error() - // don't need to use deep equals here, we know they are both strings - if expected != actual { - return Fail(t, fmt.Sprintf("Error message not equal:\n"+ - "expected: %q\n"+ - "actual : %q", expected, actual), msgAndArgs...) - } - return true -} - -// matchRegexp return true if a specified regexp matches a string. -func matchRegexp(rx interface{}, str interface{}) bool { - - var r *regexp.Regexp - if rr, ok := rx.(*regexp.Regexp); ok { - r = rr - } else { - r = regexp.MustCompile(fmt.Sprint(rx)) - } - - return (r.FindStringIndex(fmt.Sprint(str)) != nil) - -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - match := matchRegexp(rx, str) - - if !match { - Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...) - } - - return match -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - match := matchRegexp(rx, str) - - if match { - Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...) - } - - return !match - -} - -// Zero asserts that i is the zero value for its type. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// NotZero asserts that i is not the zero value for its type. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) { - return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...) - } - return true -} - -// FileExists checks whether a file exists in the given path. It also fails if -// the path points to a directory or there is an error when trying to check the file. -func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...) - } - return true -} - -// NoFileExists checks whether a file does not exist in a given path. It fails -// if the path points to an existing _file_ only. -func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - return true - } - if info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...) -} - -// DirExists checks whether a directory exists in the given path. It also fails -// if the path is a file rather a directory or there is an error checking whether it exists. -func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...) - } - return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...) - } - if !info.IsDir() { - return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...) - } - return true -} - -// NoDirExists checks whether a directory does not exist in the given path. -// It fails if the path points to an existing _directory_ only. -func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - info, err := os.Lstat(path) - if err != nil { - if os.IsNotExist(err) { - return true - } - return true - } - if !info.IsDir() { - return true - } - return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -// YAMLEq asserts that two YAML strings are equivalent. -func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - var expectedYAMLAsInterface, actualYAMLAsInterface interface{} - - if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) - } - - if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { - return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) - } - - return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...) -} - -func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) { - t := reflect.TypeOf(v) - k := t.Kind() - - if k == reflect.Ptr { - t = t.Elem() - k = t.Kind() - } - return t, k -} - -// diff returns a diff of both values as long as both are of the same type and -// are a struct, map, slice, array or string. Otherwise it returns an empty string. -func diff(expected interface{}, actual interface{}) string { - if expected == nil || actual == nil { - return "" - } - - et, ek := typeAndKind(expected) - at, _ := typeAndKind(actual) - - if et != at { - return "" - } - - if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String { - return "" - } - - var e, a string - if et != reflect.TypeOf("") { - e = spewConfig.Sdump(expected) - a = spewConfig.Sdump(actual) - } else { - e = reflect.ValueOf(expected).String() - a = reflect.ValueOf(actual).String() - } - - diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ - A: difflib.SplitLines(e), - B: difflib.SplitLines(a), - FromFile: "Expected", - FromDate: "", - ToFile: "Actual", - ToDate: "", - Context: 1, - }) - - return "\n\nDiff:\n" + diff -} - -func isFunction(arg interface{}) bool { - if arg == nil { - return false - } - return reflect.TypeOf(arg).Kind() == reflect.Func -} - -var spewConfig = spew.ConfigState{ - Indent: " ", - DisablePointerAddresses: true, - DisableCapacities: true, - SortKeys: true, - DisableMethods: true, -} - -type tHelper interface { - Helper() -} - -// Eventually asserts that given condition will be met in waitFor time, -// periodically checking target function each tick. -// -// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond) -func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - return Fail(t, "Condition never satisfied", msgAndArgs...) - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return true - } - tick = ticker.C - } - } -} - -// Never asserts that the given condition doesn't satisfy in waitFor time, -// periodically checking the target function each tick. -// -// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond) -func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - - ch := make(chan bool, 1) - - timer := time.NewTimer(waitFor) - defer timer.Stop() - - ticker := time.NewTicker(tick) - defer ticker.Stop() - - for tick := ticker.C; ; { - select { - case <-timer.C: - return true - case <-tick: - tick = nil - go func() { ch <- condition() }() - case v := <-ch: - if v { - return Fail(t, "Condition satisfied", msgAndArgs...) - } - tick = ticker.C - } - } -} diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go deleted file mode 100644 index c9dccc4..0000000 --- a/vendor/github.com/stretchr/testify/assert/doc.go +++ /dev/null @@ -1,45 +0,0 @@ -// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. -// -// Example Usage -// -// The following is a complete example using assert in a standard test function: -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(t, a, b, "The two words should be the same.") -// -// } -// -// if you assert many times, use the format below: -// -// import ( -// "testing" -// "github.com/stretchr/testify/assert" -// ) -// -// func TestSomething(t *testing.T) { -// assert := assert.New(t) -// -// var a string = "Hello" -// var b string = "Hello" -// -// assert.Equal(a, b, "The two words should be the same.") -// } -// -// Assertions -// -// Assertions allow you to easily write test code, and are global funcs in the `assert` package. -// All assertion functions take, as the first argument, the `*testing.T` object provided by the -// testing framework. This allows the assertion funcs to write the failings and other details to -// the correct place. -// -// Every assertion function also takes an optional string message as the final argument, -// allowing custom error messages to be appended to the message the assertion method outputs. -package assert diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go deleted file mode 100644 index ac9dc9d..0000000 --- a/vendor/github.com/stretchr/testify/assert/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package assert - -import ( - "errors" -) - -// AnError is an error instance useful for testing. If the code does not care -// about error specifics, and only needs to return the error for example, this -// error should be used to make the test code more readable. -var AnError = errors.New("assert.AnError general error for testing") diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go deleted file mode 100644 index df189d2..0000000 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ /dev/null @@ -1,16 +0,0 @@ -package assert - -// Assertions provides assertion methods around the -// TestingT interface. -type Assertions struct { - t TestingT -} - -// New makes a new Assertions object for the specified TestingT. -func New(t TestingT) *Assertions { - return &Assertions{ - t: t, - } -} - -//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs" diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go deleted file mode 100644 index 4ed341d..0000000 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ /dev/null @@ -1,162 +0,0 @@ -package assert - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/url" - "strings" -) - -// httpCode is a helper that returns HTTP code of the response. It returns -1 and -// an error if building a new request fails. -func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url, nil) - if err != nil { - return -1, err - } - req.URL.RawQuery = values.Encode() - handler(w, req) - return w.Code, nil -} - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - } - - isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent - if !isSuccessCode { - Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isSuccessCode -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - } - - isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect - if !isRedirectCode { - Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isRedirectCode -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - } - - isErrorCode := code >= http.StatusBadRequest - if !isErrorCode { - Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) - } - - return isErrorCode -} - -// HTTPStatusCode asserts that a specified handler returns a specified status code. -// -// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501) -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - code, err := httpCode(handler, method, url, values) - if err != nil { - Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) - } - - successful := code == statuscode - if !successful { - Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) - } - - return successful -} - -// HTTPBody is a helper that returns HTTP body of the response. It returns -// empty string if building a new request fails. -func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { - w := httptest.NewRecorder() - req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) - if err != nil { - return "" - } - handler(w, req) - return w.Body.String() -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if !contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return contains -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool { - if h, ok := t.(tHelper); ok { - h.Helper() - } - body := HTTPBody(handler, method, url, values) - - contains := strings.Contains(body, fmt.Sprint(str)) - if contains { - Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) - } - - return !contains -} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml deleted file mode 100644 index 04d4dae..0000000 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "tip" - -go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE deleted file mode 100644 index 2683e4b..0000000 --- a/vendor/gopkg.in/yaml.v3/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ - -This project is covered by two different licenses: MIT and Apache. - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE deleted file mode 100644 index 866d74a..0000000 --- a/vendor/gopkg.in/yaml.v3/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md deleted file mode 100644 index 08eb1ba..0000000 --- a/vendor/gopkg.in/yaml.v3/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.2, but preserves some behavior -from 1.1 for backwards compatibility. - -Specifically, as of v3 of the yaml package: - - - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being - decoded into a typed bool value. Otherwise they behave as a string. Booleans - in YAML 1.2 are _true/false_ only. - - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ - as specified in YAML 1.2, because most parsers still use the old format. - Octals in the _0o777_ format are supported though, so new files work. - - Does not support base-60 floats. These are gone from YAML 1.2, and were - actually never supported by this package as it's clearly a poor choice. - -and offers backwards -compatibility with YAML 1.1 in some cases. -1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v3*. - -To install it, run: - - go get gopkg.in/yaml.v3 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) - -API stability -------------- - -The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the MIT and Apache License 2.0 licenses. -Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v3" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go deleted file mode 100644 index 65846e6..0000000 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ /dev/null @@ -1,746 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -// Create ALIAS. -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - anchor: anchor, - } - return true -} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go deleted file mode 100644 index be63169..0000000 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ /dev/null @@ -1,931 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *Node - anchors map[string]*Node - doneInit bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.anchors = make(map[string]*Node) - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *Node, anchor []byte) { - if anchor != nil { - n.Anchor = string(anchor) - p.anchors[n.Anchor] = n - } -} - -func (p *parser) parse() *Node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - case yaml_TAIL_COMMENT_EVENT: - panic("internal error: unexpected tail comment event (please report)") - default: - panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) - } -} - -func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { - var style Style - if tag != "" && tag != "!" { - tag = shortTag(tag) - style = TaggedStyle - } else if defaultTag != "" { - tag = defaultTag - } else if kind == ScalarNode { - tag, _ = resolve("", value) - } - return &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - Line: p.event.start_mark.line + 1, - Column: p.event.start_mark.column + 1, - HeadComment: string(p.event.head_comment), - LineComment: string(p.event.line_comment), - FootComment: string(p.event.foot_comment), - } -} - -func (p *parser) parseChild(parent *Node) *Node { - child := p.parse() - parent.Content = append(parent.Content, child) - return child -} - -func (p *parser) document() *Node { - n := p.node(DocumentNode, "", "", "") - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - p.parseChild(n) - if p.peek() == yaml_DOCUMENT_END_EVENT { - n.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *Node { - n := p.node(AliasNode, "", "", string(p.event.anchor)) - n.Alias = p.anchors[n.Value] - if n.Alias == nil { - failf("unknown anchor '%s' referenced", n.Value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *Node { - var parsedStyle = p.event.scalar_style() - var nodeStyle Style - switch { - case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = DoubleQuotedStyle - case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = SingleQuotedStyle - case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: - nodeStyle = LiteralStyle - case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: - nodeStyle = FoldedStyle - } - var nodeValue = string(p.event.value) - var nodeTag = string(p.event.tag) - var defaultTag string - if nodeStyle == 0 { - if nodeValue == "<<" { - defaultTag = mergeTag - } - } else { - defaultTag = strTag - } - n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) - n.Style |= nodeStyle - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *Node { - n := p.node(SequenceNode, seqTag, string(p.event.tag), "") - if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - p.parseChild(n) - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *Node { - n := p.node(MappingNode, mapTag, string(p.event.tag), "") - block := true - if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { - block = false - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - k := p.parseChild(n) - if block && k.FootComment != "" { - // Must be a foot comment for the prior value when being dedented. - if len(n.Content) > 2 { - n.Content[len(n.Content)-3].FootComment = k.FootComment - k.FootComment = "" - } - } - v := p.parseChild(n) - if k.FootComment == "" && v.FootComment != "" { - k.FootComment = v.FootComment - v.FootComment = "" - } - if p.peek() == yaml_TAIL_COMMENT_EVENT { - if k.FootComment == "" { - k.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_TAIL_COMMENT_EVENT) - } - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { - n.Content[len(n.Content)-2].FootComment = n.FootComment - n.FootComment = "" - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *Node - aliases map[*Node]bool - terrors []string - - stringMapType reflect.Type - generalMapType reflect.Type - - knownFields bool - uniqueKeys bool - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - nodeType = reflect.TypeOf(Node{}) - durationType = reflect.TypeOf(time.Duration(0)) - stringMapType = reflect.TypeOf(map[string]interface{}{}) - generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = generalMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder() *decoder { - d := &decoder{ - stringMapType: stringMapType, - generalMapType: generalMapType, - uniqueKeys: true, - } - d.aliases = make(map[*Node]bool) - return d -} - -func (d *decoder) terror(n *Node, tag string, out reflect.Value) { - if n.Tag != "" { - tag = n.Tag - } - value := n.Value - if tag != seqTag && tag != mapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { - err := u.UnmarshalYAML(n) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - outi := out.Addr().Interface() - if u, ok := outi.(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - if u, ok := outi.(obsoleteUnmarshaler); ok { - good = d.callObsoleteUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { - if n.ShortTag() == nullTag { - return reflect.Value{} - } - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - if out.Type() == nodeType { - out.Set(reflect.ValueOf(n).Elem()) - return true - } - switch n.Kind { - case DocumentNode: - return d.document(n, out) - case AliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.Kind { - case ScalarNode: - good = d.scalar(n, out) - case MappingNode: - good = d.mapping(n, out) - case SequenceNode: - good = d.sequence(n, out) - default: - panic("internal error: unknown node kind: " + strconv.Itoa(int(n.Kind))) - } - return good -} - -func (d *decoder) document(n *Node, out reflect.Value) (good bool) { - if len(n.Content) == 1 { - d.doc = n - d.unmarshal(n.Content[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.Value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.Alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) scalar(n *Node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.indicatedString() { - tag = strTag - resolved = n.Value - } else { - tag, resolved = resolve(n.Tag, n.Value) - if tag == binaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == binaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.Value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == binaryTag { - out.SetString(resolved.(string)) - return true - } - out.SetString(n.Value) - return true - case reflect.Interface: - out.Set(reflect.ValueOf(resolved)) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // This used to work in v2, but it's very unfriendly. - isDuration := out.Type() == durationType - - switch resolved := resolved.(type) { - case int: - if !isDuration && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !isDuration && !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - case string: - // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). - // It only works if explicitly attempting to unmarshal into a typed bool value. - switch resolved { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": - out.SetBool(true) - return true - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": - out.SetBool(false) - return true - } - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - panic("yaml internal error: please report the issue") - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, seqTag, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.Content[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - if d.uniqueKeys { - nerrs := len(d.terrors) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - for j := i + 2; j < l; j += 2 { - nj := n.Content[j] - if ni.Kind == nj.Kind && ni.Value == nj.Value { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) - } - } - } - if len(d.terrors) > nerrs { - return false - } - } - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Map: - // okay - case reflect.Interface: - iface := out - if isStringMap(n) { - out = reflect.MakeMap(d.stringMapType) - } else { - out = reflect.MakeMap(d.generalMapType) - } - iface.Set(out) - default: - d.terror(n, mapTag, out) - return false - } - - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - stringMapType := d.stringMapType - generalMapType := d.generalMapType - if outt.Elem() == ifaceType { - if outt.Key().Kind() == reflect.String { - d.stringMapType = outt - } else if outt.Key() == ifaceType { - d.generalMapType = outt - } - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - for i := 0; i < l; i += 2 { - if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.Content[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.stringMapType = stringMapType - d.generalMapType = generalMapType - return true -} - -func isStringMap(n *Node) bool { - if n.Kind != MappingNode { - return false - } - l := len(n.Content) - for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { - return false - } - } - return true -} - -func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for _, index := range sinfo.InlineUnmarshalers { - field := d.fieldByIndex(n, out, index) - d.prepare(n, field) - } - - var doneFields []bool - if d.uniqueKeys { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - name := settableValueOf("") - l := len(n.Content) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - if isMerge(ni) { - d.merge(n.Content[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.uniqueKeys { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = d.fieldByIndex(n, out, info.Inline) - } - d.unmarshal(n.Content[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.Content[i+1], value) - inlineMap.SetMapIndex(name, value) - } else if d.knownFields { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { - case MappingNode: - d.unmarshal(n, out) - case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { - failWantMap() - } - d.unmarshal(n, out) - case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] - if ni.Kind == AliasNode { - if ni.Alias != nil && ni.Alias.Kind != MappingNode { - failWantMap() - } - } else if ni.Kind != MappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *Node) bool { - return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) -} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go deleted file mode 100644 index ab2a066..0000000 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ /dev/null @@ -1,1992 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and below and drop from everywhere else (see commented lines). - emitter.indention = true - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and above and drop from everywhere else (see commented lines). - emitter.indention = true - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - emitter.indent += emitter.best_indent - // [Go] If inside a block sequence item, discount the space taken by the indicator. - if emitter.best_indent > 2 && emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - emitter.indent -= 2 - } - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) - - case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) - - case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - emitter.space_above = true - emitter.foot_indent = -1 - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical || true { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if len(emitter.head_comment) > 0 { - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !put_break(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - // [Go] Force document foot separation. - emitter.foot_indent = 0 - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.foot_indent = -1 - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - if emitter.canonical && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.column == 0 || emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - // [Go] The original logic here would not indent the sequence when inside a mapping. - // In Go we always indent it, but take the sequence indicator out of the indentation. - indentless := emitter.best_indent == 2 && emitter.mapping_context && (emitter.column == 0 || !emitter.indention) - original := emitter.indent - if !yaml_emitter_increase_indent(emitter, false, indentless) { - return false - } - if emitter.indent > original+2 { - emitter.indent -= 2 - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Write a head comment. -func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { - if len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { - return false - } - emitter.tail_comment = emitter.tail_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - } - - if len(emitter.head_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.head_comment) { - return false - } - emitter.head_comment = emitter.head_comment[:0] - return true -} - -// Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { - if len(emitter.line_comment) == 0 { - return true - } - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !yaml_emitter_write_comment(emitter, emitter.line_comment) { - return false - } - emitter.line_comment = emitter.line_comment[:0] - return true -} - -// Write a foot comment. -func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { - if len(emitter.foot_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { - return false - } - emitter.foot_comment = emitter.foot_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - return true -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - tab_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if value[i] == '\t' { - tab_characters = true - } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || tab_characters || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - if len(event.head_comment) > 0 { - emitter.head_comment = event.head_comment - } - if len(event.line_comment) > 0 { - emitter.line_comment = event.line_comment - } - if len(event.foot_comment) > 0 { - emitter.foot_comment = event.foot_comment - } - if len(event.tail_comment) > 0 { - emitter.tail_comment = event.tail_comment - } - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - if emitter.foot_indent == indent { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - //emitter.indention = true - emitter.space_above = false - emitter.foot_indent = -1 - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if len(value) > 0 && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if len(value) > 0 { - emitter.whitespace = false - } - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} - -func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { - breaks := false - pound := false - for i := 0; i < len(comment); { - if is_break(comment, i) { - if !write_break(emitter, comment, &i) { - return false - } - //emitter.indention = true - breaks = true - pound = false - } else { - if breaks && !yaml_emitter_write_indent(emitter) { - return false - } - if !pound { - if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { - return false - } - pound = true - } - if !write(emitter, comment, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - if !breaks && !put_break(emitter) { - return false - } - - emitter.whitespace = true - //emitter.indention = true - return true -} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go deleted file mode 100644 index 1f37271..0000000 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ /dev/null @@ -1,561 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - indent int - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - if e.indent == 0 { - e.indent = 4 - } - e.emitter.best_indent = e.indent - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - var node *Node - if in.IsValid() { - node, _ = in.Interface().(*Node) - } - if node != nil && node.Kind == DocumentNode { - e.nodev(in) - } else { - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - tag = shortTag(tag) - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch value := iface.(type) { - case *Node: - e.nodev(in) - return - case time.Time: - e.timev(tag, in) - return - case *time.Time: - e.timev(tag, in.Elem()) - return - case time.Duration: - e.stringv(tag, reflect.ValueOf(value.String())) - return - case Marshaler: - v, err := value.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - e.marshal(tag, reflect.ValueOf(v)) - return - case encoding.TextMarshaler: - text, err := value.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - e.marshal(tag, in.Elem()) - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice, reflect.Array: - e.slicev(tag, in) - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.intv(tag, in) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = e.fieldByIndex(in, info.Inline) - if !value.IsValid() { - continue - } - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -// isOldBool returns whether s is bool notation as defined in YAML 1.1. -// -// We continue to force strings that YAML 1.1 would interpret as booleans to be -// rendered as quotes strings so that the marshalled output valid for YAML 1.1 -// parsing. -func isOldBool(s string) (result bool) { - switch s { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", - "n", "N", "no", "No", "NO", "off", "Off", "OFF": - return true - default: - return false - } -} - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - if e.flow { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else { - style = yaml_LITERAL_SCALAR_STYLE - } - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style, nil, nil, nil, nil) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { - // TODO Kill this function. Replace all initialize calls by their underlining Go literals. - implicit := tag == "" - if !implicit { - tag = longTag(tag) - } - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.event.head_comment = head - e.event.line_comment = line - e.event.foot_comment = foot - e.event.tail_comment = tail - e.emit() -} - -func (e *encoder) nodev(in reflect.Value) { - e.node(in.Interface().(*Node), "") -} - -func (e *encoder) node(node *Node, tail string) { - // If the tag was not explicitly requested, and dropping it won't change the - // implicit tag of the value, don't include it in the presentation. - var tag = node.Tag - var stag = shortTag(tag) - var rtag string - var forceQuoting bool - if tag != "" && node.Style&TaggedStyle == 0 { - if node.Kind == ScalarNode { - if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { - tag = "" - } else { - rtag, _ = resolve("", node.Value) - if rtag == stag { - tag = "" - } else if stag == strTag { - tag = "" - forceQuoting = true - } - } - } else { - switch node.Kind { - case MappingNode: - rtag = mapTag - case SequenceNode: - rtag = seqTag - } - if rtag == stag { - tag = "" - } - } - } - - switch node.Kind { - case DocumentNode: - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - yaml_document_end_event_initialize(&e.event, true) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case SequenceNode: - style := yaml_BLOCK_SEQUENCE_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style)) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case MappingNode: - style := yaml_BLOCK_MAPPING_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(tag), tag == "", style) - e.event.tail_comment = []byte(tail) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - - // The tail logic below moves the foot comment of prior keys to the following key, - // since the value for each key may be a nested structure and the foot needs to be - // processed only the entirety of the value is streamed. The last tail is processed - // with the mapping end event. - var tail string - for i := 0; i+1 < len(node.Content); i += 2 { - k := node.Content[i] - foot := k.FootComment - if foot != "" { - kopy := *k - kopy.FootComment = "" - k = &kopy - } - e.node(k, tail) - tail = foot - - v := node.Content[i+1] - e.node(v, "") - } - - yaml_mapping_end_event_initialize(&e.event) - e.event.tail_comment = []byte(tail) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case AliasNode: - yaml_alias_event_initialize(&e.event, []byte(node.Value)) - e.event.head_comment = []byte(node.HeadComment) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case ScalarNode: - value := node.Value - if !utf8.ValidString(value) { - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - value = encodeBase64(value) - } - - style := yaml_PLAIN_SCALAR_STYLE - switch { - case node.Style&DoubleQuotedStyle != 0: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - case node.Style&SingleQuotedStyle != 0: - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - case node.Style&LiteralStyle != 0: - style = yaml_LITERAL_SCALAR_STYLE - case node.Style&FoldedStyle != 0: - style = yaml_FOLDED_SCALAR_STYLE - case strings.Contains(value, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case forceQuoting: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) - } -} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod deleted file mode 100644 index f407ea3..0000000 --- a/vendor/gopkg.in/yaml.v3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v3" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go deleted file mode 100644 index aea9050..0000000 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ /dev/null @@ -1,1229 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - token := &parser.tokens[parser.tokens_head] - yaml_parser_unfold_comments(parser, token) - return token - } - return nil -} - -// yaml_parser_unfold_comments walks through the comments queue and joins all -// comments behind the position of the provided token into the respective -// top-level comment slices in the parser. -func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { - for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { - comment := &parser.comments[parser.comments_head] - if len(comment.head) > 0 { - if token.typ == yaml_BLOCK_END_TOKEN { - // No heads on ends, so keep comment.head for a follow up token. - break - } - if len(parser.head_comment) > 0 { - parser.head_comment = append(parser.head_comment, '\n') - } - parser.head_comment = append(parser.head_comment, comment.head...) - } - if len(comment.foot) > 0 { - if len(parser.foot_comment) > 0 { - parser.foot_comment = append(parser.foot_comment, '\n') - } - parser.foot_comment = append(parser.foot_comment, comment.foot...) - } - if len(comment.line) > 0 { - if len(parser.line_comment) > 0 { - parser.line_comment = append(parser.line_comment, '\n') - } - parser.line_comment = append(parser.line_comment, comment.line...) - } - *comment = yaml_comment_t{} - parser.comments_head++ - } -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - var head_comment []byte - if len(parser.head_comment) > 0 { - // [Go] Scan the header comment backwards, and if an empty line is found, break - // the header so the part before the last empty line goes into the - // document header, while the bottom of it goes into a follow up event. - for i := len(parser.head_comment) - 1; i > 0; i-- { - if parser.head_comment[i] == '\n' { - if i == len(parser.head_comment)-1 { - head_comment = parser.head_comment[:i] - parser.head_comment = parser.head_comment[i+1:] - break - } else if parser.head_comment[i-1] == '\n' { - head_comment = parser.head_comment[:i-1] - parser.head_comment = parser.head_comment[i+1:] - break - } - } - } - } - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - - head_comment: head_comment, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - yaml_parser_set_event_comments(parser, event) - if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { - event.foot_comment = event.head_comment - event.head_comment = nil - } - return true -} - -func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { - event.head_comment = parser.head_comment - event.line_comment = parser.line_comment - event.foot_comment = parser.foot_comment - parser.head_comment = nil - parser.line_comment = nil - parser.foot_comment = nil - parser.tail_comment = nil - parser.stem_comment = nil -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head := len(parser.head_comment) - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if prior_head > 0 && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - // [Go] It's a sequence under a sequence entry, so the former head comment - // is for the list itself, not the first list item under it. - parser.stem_comment = parser.head_comment[:prior_head] - if len(parser.head_comment) == prior_head { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[prior_head+1:]...) - } - - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - // [Go] A tail comment was left from the prior mapping value processed. Emit an event - // as it needs to be processed with that value and not the following key. - if len(parser.tail_comment) > 0 { - *event = yaml_event_t{ - typ: yaml_TAIL_COMMENT_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - foot_comment: parser.tail_comment, - } - parser.tail_comment = nil - return true - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go deleted file mode 100644 index b7de0a8..0000000 --- a/vendor/gopkg.in/yaml.v3/readerc.go +++ /dev/null @@ -1,434 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go deleted file mode 100644 index 64ae888..0000000 --- a/vendor/gopkg.in/yaml.v3/resolve.go +++ /dev/null @@ -1,326 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, boolTag, []string{"true", "True", "TRUE"}}, - {false, boolTag, []string{"false", "False", "FALSE"}}, - {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", mergeTag, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const ( - nullTag = "!!null" - boolTag = "!!bool" - strTag = "!!str" - intTag = "!!int" - floatTag = "!!float" - timestampTag = "!!timestamp" - seqTag = "!!seq" - mapTag = "!!map" - binaryTag = "!!binary" - mergeTag = "!!merge" -) - -var longTags = make(map[string]string) -var shortTags = make(map[string]string) - -func init() { - for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { - ltag := longTag(stag) - longTags[stag] = ltag - shortTags[ltag] = stag - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - if strings.HasPrefix(tag, longTagPrefix) { - if stag, ok := shortTags[tag]; ok { - return stag - } - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - if ltag, ok := longTags[tag]; ok { - return ltag - } - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - tag = shortTag(tag) - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, strTag, binaryTag: - return - case floatTag: - if rtag == intTag { - switch v := out.(type) { - case int64: - rtag = floatTag - out = float64(v) - return - case int: - rtag = floatTag - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != strTag && tag != binaryTag { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return floatTag, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == timestampTag { - t, ok := parseTimestamp(in) - if ok { - return timestampTag, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return intTag, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return floatTag, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - // Octals as introduced in version 1.2 of the spec. - // Octals from the 1.1 spec, spelled as 0777, are still - // decoded by default in v3 as well for compatibility. - // May be dropped in v4 depending on how usage evolves. - if strings.HasPrefix(plain, "0o") { - intv, err := strconv.ParseInt(plain[2:], 8, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 8, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0o") { - intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - default: - panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") - } - } - return strTag, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go deleted file mode 100644 index 57e954c..0000000 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ /dev/null @@ -1,3025 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - parser.newlines++ - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - parser.newlines++ - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.newlines++ - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // [Go] The comment parsing logic requires a lookahead of two tokens - // so that foot comments may be parsed in time of associating them - // with the tokens that are parsed before them, and also for line - // comments to be transformed into head comments in some edge cases. - if parser.tokens_head < len(parser.tokens)-2 { - // If a potential simple key is at the head position, we need to fetch - // the next token to disambiguate it. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - scan_mark := parser.mark - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // [Go] While unrolling indents, transform the head comments of prior - // indentation levels observed after scan_start into foot comments at - // the respective indexes. - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - comment_mark := parser.mark - if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { - // Associate any following comments with the prior token. - comment_mark = parser.tokens[len(parser.tokens)-1].start_mark - } - defer func() { - if !ok { - return - } - if !yaml_parser_scan_line_comment(parser, comment_mark) { - ok = false - return - } - }() - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] TODO Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the β€œ:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - block_mark := scan_mark - block_mark.index-- - - // Loop through the indentation levels in the stack. - for parser.indent > column { - - // [Go] Reposition the end token before potential following - // foot comments of parent blocks. For that, search - // backwards for recent comments that were at the same - // indent as the block that is ending now. - stop_index := block_mark.index - for i := len(parser.comments) - 1; i >= 0; i-- { - comment := &parser.comments[i] - - if comment.end_mark.index < stop_index { - // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. - // If requested indent column is < 0, then the document is over and everything else - // is a foot anyway. - break - } - if comment.start_mark.column == parser.indent+1 { - // This is a good match. But maybe there's a former comment - // at that same indent level, so keep searching. - block_mark = comment.start_mark - } - - // While the end of the former comment matches with - // the start of the following one, we know there's - // nothing in between and scanning is still safe. - stop_index = comment.scan_mark.index - } - - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: block_mark, - end_mark: block_mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - scan_mark := parser.mark - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if we just had a line comment under a sequence entry that - // looks more like a header to the following content. Similar to this: - // - // - # The comment - // - Some data - // - // If so, transform the line comment to a head comment and reposition. - if len(parser.comments) > 0 && len(parser.tokens) > 1 { - tokenA := parser.tokens[len(parser.tokens)-2] - tokenB := parser.tokens[len(parser.tokens)-1] - comment := &parser.comments[len(parser.comments)-1] - if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { - // If it was in the prior line, reposition so it becomes a - // header of the follow up token. Otherwise, keep it in place - // so it becomes a header of the former. - comment.head = comment.line - comment.line = nil - if comment.start_mark.line == parser.mark.line-1 { - comment.token_mark = parser.mark - } - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - if !yaml_parser_scan_comments(parser, scan_mark) { - return false - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - // [Go] Discard this inline comment for the time being. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] TODO Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} - -func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { - if parser.newlines > 0 { - return true - } - - var start_mark yaml_mark_t - var text []byte - - for peek := 0; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else { - if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = append(text, parser.buffer[parser.buffer_pos]) - } - skip(parser) - } - } - } - break - } - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - token_mark: token_mark, - start_mark: start_mark, - line: text, - }) - } - return true -} - -func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { - token := parser.tokens[len(parser.tokens)-1] - - if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { - token = parser.tokens[len(parser.tokens)-2] - } - - var token_mark = token.start_mark - var start_mark yaml_mark_t - - var recent_empty = false - var first_empty = parser.newlines <= 1 - - var line = parser.mark.line - var column = parser.mark.column - - var text []byte - - // The foot line is the place where a comment must start to - // still be considered as a foot of the prior content. - // If there's some content in the currently parsed line, then - // the foot is the line below it. - var foot_line = -1 - if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 - if parser.newlines == 0 && parser.mark.column > 1 { - foot_line++ - } - } - - var peek = 0 - for ; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - column++ - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { - // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { - // This is the first empty line and there were no empty lines before, - // so this initial part of the comment is a foot of the prior token - // instead of being a head for the following one. Split it up. - if len(text) > 0 { - if start_mark.column-1 < parser.indent { - // If dedented it's unrelated to the prior token. - token_mark = start_mark - } - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - } else { - if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { - text = append(text, '\n') - } - } - } - if !is_break(parser.buffer, parser.buffer_pos+peek) { - break - } - first_empty = false - recent_empty = true - column = 0 - line++ - continue - } - - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { - // The comment at the different indentation is a foot of the - // preceding data rather than a head of the upcoming one. - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - - if parser.buffer[parser.buffer_pos+peek] != '#' { - break - } - - if len(text) == 0 { - start_mark = yaml_mark_t{parser.mark.index + peek, line, column} - } else { - text = append(text, '\n') - } - - recent_empty = false - - // Consume until after the consumed comment line. - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else { - if parser.mark.index >= seen { - text = append(text, parser.buffer[parser.buffer_pos]) - } - skip(parser) - } - } - - peek = 0 - column = 0 - line = parser.mark.line - } - - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: start_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, - head: text, - }) - } - return true -} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go deleted file mode 100644 index 9210ece..0000000 --- a/vendor/gopkg.in/yaml.v3/sorter.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - digits := false - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - digits = unicode.IsDigit(ar[i]) - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - if digits { - return al - } else { - return bl - } - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go deleted file mode 100644 index b8a116b..0000000 --- a/vendor/gopkg.in/yaml.v3/writerc.go +++ /dev/null @@ -1,48 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go deleted file mode 100644 index b5d35a5..0000000 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ /dev/null @@ -1,662 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "unicode/utf8" -) - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. -type Unmarshaler interface { - UnmarshalYAML(value *Node) error -} - -type obsoleteUnmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// A Decorder reads and decodes YAML values from an input stream. -type Decoder struct { - parser *parser - knownFields bool -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// KnownFields ensures that the keys in decoded mappings to -// exist as fields in the struct being decoded into. -func (dec *Decoder) KnownFields(enable bool) { - dec.knownFields = enable -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder() - d.knownFields = dec.knownFields - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Decode decodes the node and stores its data into the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (n *Node) Decode(v interface{}) (err error) { - d := newDecoder() - defer handleErr(&err) - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(n, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// SetIndent changes the used indentation used when encoding. -func (e *Encoder) SetIndent(spaces int) { - if spaces < 0 { - panic("yaml: cannot indent to a negative number of spaces") - } - e.encoder.indent = spaces -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -type Kind uint32 - -const ( - DocumentNode Kind = 1 << iota - SequenceNode - MappingNode - ScalarNode - AliasNode -) - -type Style uint32 - -const ( - TaggedStyle Style = 1 << iota - DoubleQuotedStyle - SingleQuotedStyle - LiteralStyle - FoldedStyle - FlowStyle -) - -// Node represents an element in the YAML document hierarchy. While documents -// are typically encoded and decoded into higher level types, such as structs -// and maps, Node is an intermediate representation that allows detailed -// control over the content being decoded or encoded. -// -// Values that make use of the Node type interact with the yaml package in the -// same way any other type would do, by encoding and decoding yaml data -// directly or indirectly into them. -// -// For example: -// -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: -// -// var person Node -// err := yaml.Unmarshal(data, &person) -// -type Node struct { - // Kind defines whether the node is a document, a mapping, a sequence, - // a scalar value, or an alias to another node. The specific data type of - // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind - - // Style allows customizing the apperance of the node in the tree. - Style Style - - // Tag holds the YAML tag defining the data type for the value. - // When decoding, this field will always be set to the resolved tag, - // even when it wasn't explicitly provided in the YAML content. - // When encoding, if this field is unset the value type will be - // implied from the node properties, and if it is set, it will only - // be serialized into the representation if TaggedStyle is used or - // the implicit tag diverges from the provided one. - Tag string - - // Value holds the unescaped and unquoted represenation of the value. - Value string - - // Anchor holds the anchor name for this node, which allows aliases to point to it. - Anchor string - - // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. - Alias *Node - - // Content holds contained nodes for documents, mappings, and sequences. - Content []*Node - - // HeadComment holds any comments in the lines preceding the node and - // not separated by an empty line. - HeadComment string - - // LineComment holds any comments at the end of the line where the node is in. - LineComment string - - // FootComment holds any comments following the node and before empty lines. - FootComment string - - // Line and Column hold the node position in the decoded YAML text. - // These fields are not respected when encoding the node. - Line int - Column int -} - -// LongTag returns the long form of the tag that indicates the data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) LongTag() string { - return longTag(n.ShortTag()) -} - -// ShortTag returns the short form of the YAML tag that indicates data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) ShortTag() string { - if n.indicatedString() { - return strTag - } - if n.Tag == "" || n.Tag == "!" { - switch n.Kind { - case MappingNode: - return mapTag - case SequenceNode: - return seqTag - case AliasNode: - if n.Alias != nil { - return n.Alias.ShortTag() - } - case ScalarNode: - tag, _ := resolve("", n.Value) - return tag - } - return "" - } - return shortTag(n.Tag) -} - -func (n *Node) indicatedString() bool { - return n.Kind == ScalarNode && - (shortTag(n.Tag) == strTag || - (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) -} - -// SetString is a convenience function that sets the node to a string value -// and defines its style in a pleasant way depending on its content. -func (n *Node) SetString(s string) { - n.Kind = ScalarNode - if utf8.ValidString(s) { - n.Value = s - n.Tag = strTag - } else { - n.Value = encodeBase64(s) - n.Tag = binaryTag - } - if strings.Contains(n.Value, "\n") { - n.Style = LiteralStyle - } -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int - - // InlineUnmarshalers holds indexes to inlined fields that - // contain unmarshaler values. - InlineUnmarshalers [][]int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex -var unmarshalerType reflect.Type - -func init() { - var v Unmarshaler - unmarshalerType = reflect.ValueOf(&v).Elem().Type() -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - inlineUnmarshalers := [][]int(nil) - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct, reflect.Ptr: - ftype := field.Type - for ftype.Kind() == reflect.Ptr { - ftype = ftype.Elem() - } - if ftype.Kind() != reflect.Struct { - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - if reflect.PtrTo(ftype).Implements(unmarshalerType) { - inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) - } else { - sinfo, err := getStructInfo(ftype) - if err != nil { - return nil, err - } - for _, index := range sinfo.InlineUnmarshalers { - inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - } - default: - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - InlineUnmarshalers: inlineUnmarshalers, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go deleted file mode 100644 index 2719cfb..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ /dev/null @@ -1,805 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - - yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. - yaml_TAIL_COMMENT_EVENT -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", - yaml_TAIL_COMMENT_EVENT: "tail comment", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - newlines int // The number of line breaks since last non-break/non-blank character - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Comments - - head_comment []byte // The current head comments - line_comment []byte // The current line comments - foot_comment []byte // The current foot comments - tail_comment []byte // Foot comment that happens at the end of a block. - stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) - - comments []yaml_comment_t // The folded comments for all parsed tokens - comments_head int - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -type yaml_comment_t struct { - - scan_mark yaml_mark_t // Position where scanning for comments started - token_mark yaml_mark_t // Position after which tokens will be associated with this comment - start_mark yaml_mark_t // Position of '#' comment mark - end_mark yaml_mark_t // Position where comment terminated - - head []byte - line []byte - foot []byte -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - space_above bool // Is there's an empty line above? - foot_indent int // The indent used to write the foot comment above, or -1 if none. - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go deleted file mode 100644 index e88f9c5..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlprivateh.go +++ /dev/null @@ -1,198 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( - // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( - // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/modules.txt b/vendor/modules.txt deleted file mode 100644 index 6499820..0000000 --- a/vendor/modules.txt +++ /dev/null @@ -1,24 +0,0 @@ -# github.com/davecgh/go-spew v1.1.1 -github.com/davecgh/go-spew/spew -# github.com/google/uuid v1.1.2 -## explicit -github.com/google/uuid -# github.com/hashicorp/go-version v1.0.0 -github.com/hashicorp/go-version -# github.com/hashicorp/logutils v0.0.0-20150609070431-0dc08b1671f3 -github.com/hashicorp/logutils -# github.com/pact-foundation/pact-go v1.5.0 -## explicit -github.com/pact-foundation/pact-go/client -github.com/pact-foundation/pact-go/dsl -github.com/pact-foundation/pact-go/install -github.com/pact-foundation/pact-go/proxy -github.com/pact-foundation/pact-go/types -github.com/pact-foundation/pact-go/utils -# github.com/pmezard/go-difflib v1.0.0 -github.com/pmezard/go-difflib/difflib -# github.com/stretchr/testify v1.6.1 -## explicit -github.com/stretchr/testify/assert -# gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c -gopkg.in/yaml.v3